You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2017/08/31 16:40:40 UTC

[01/50] [abbrv] hadoop git commit: YARN-6237. Move UID constant to TimelineReaderUtils (Rohith Sharma K S via Varun Saxena)

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 845cd3699 -> 11e5f54fe


YARN-6237. Move UID constant to TimelineReaderUtils (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18b3a80d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18b3a80d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18b3a80d

Branch: refs/heads/HADOOP-13345
Commit: 18b3a80df7e028dfa0d1fa2d48b9f9ac1401ec54
Parents: c3bd8d6
Author: Varun Saxena <va...@apache.org>
Authored: Thu Mar 9 01:06:54 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:52 2017 +0530

----------------------------------------------------------------------
 .../TestTimelineReaderWebServicesHBaseStorage.java      | 12 ++++++------
 .../timelineservice/reader/TimelineReaderManager.java   | 12 ++++--------
 .../timelineservice/reader/TimelineReaderUtils.java     |  3 +++
 .../reader/TestTimelineReaderWebServices.java           |  4 ++--
 4 files changed, 15 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18b3a80d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index b2fe267..6b0f95e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -657,7 +657,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
       List<String> listFlowUIDs = new ArrayList<String>();
       for (FlowActivityEntity entity : flowEntities) {
         String flowUID =
-            (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+            (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
         listFlowUIDs.add(flowUID);
         assertEquals(TimelineUIDConverter.FLOW_UID.encodeUID(
             new TimelineReaderContext(entity.getCluster(), entity.getUser(),
@@ -681,7 +681,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
         assertNotNull(frEntities);
         for (FlowRunEntity entity : frEntities) {
           String flowRunUID =
-              (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+              (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
           listFlowRunUIDs.add(flowRunUID);
           assertEquals(TimelineUIDConverter.FLOWRUN_UID.encodeUID(
               new TimelineReaderContext("cluster1", entity.getUser(),
@@ -713,7 +713,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
         assertNotNull(appEntities);
         for (TimelineEntity entity : appEntities) {
           String appUID =
-              (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+              (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
           listAppUIDs.add(appUID);
           assertEquals(TimelineUIDConverter.APPLICATION_UID.encodeUID(
               new TimelineReaderContext(context.getClusterId(),
@@ -746,7 +746,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
         assertNotNull(entities);
         for (TimelineEntity entity : entities) {
           String entityUID =
-              (String)entity.getInfo().get(TimelineReaderManager.UID_KEY);
+              (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
           listEntityUIDs.add(entityUID);
           assertEquals(TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(
               new TimelineReaderContext(context.getClusterId(),
@@ -827,7 +827,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
         assertNotNull(entity.getInfo());
         assertEquals(2, entity.getInfo().size());
         String uid =
-            (String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
+            (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
         assertNotNull(uid);
         assertTrue(uid.equals(appUIDWithFlowInfo + "!type1!0!entity1")
             || uid.equals(appUIDWithFlowInfo + "!type1!0!entity2"));
@@ -855,7 +855,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
         assertNotNull(entity.getInfo());
         assertEquals(2, entity.getInfo().size());
         String uid =
-            (String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
+            (String) entity.getInfo().get(TimelineReaderUtils.UID_KEY);
         assertNotNull(uid);
         assertTrue(uid.equals(appUIDWithoutFlowInfo + "!type1!0!entity1")
             || uid.equals(appUIDWithoutFlowInfo + "!type1!0!entity2"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18b3a80d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
index 66e4cbf..ee827da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
@@ -32,8 +32,6 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * This class wraps over the timeline reader store implementation. It does some
  * non trivial manipulation of the timeline data before or after getting
@@ -43,8 +41,6 @@ import com.google.common.annotations.VisibleForTesting;
 @Unstable
 public class TimelineReaderManager extends AbstractService {
 
-  @VisibleForTesting
-  public static final String UID_KEY = "UID";
   private TimelineReader reader;
 
   public TimelineReaderManager(TimelineReader timelineReader) {
@@ -94,18 +90,18 @@ public class TimelineReaderManager extends AbstractService {
         FlowActivityEntity activityEntity = (FlowActivityEntity)entity;
         context.setUserId(activityEntity.getUser());
         context.setFlowName(activityEntity.getFlowName());
-        entity.setUID(UID_KEY,
+        entity.setUID(TimelineReaderUtils.UID_KEY,
             TimelineUIDConverter.FLOW_UID.encodeUID(context));
         return;
       case YARN_FLOW_RUN:
         FlowRunEntity runEntity = (FlowRunEntity)entity;
         context.setFlowRunId(runEntity.getRunId());
-        entity.setUID(UID_KEY,
+        entity.setUID(TimelineReaderUtils.UID_KEY,
             TimelineUIDConverter.FLOWRUN_UID.encodeUID(context));
         return;
       case YARN_APPLICATION:
         context.setAppId(entity.getId());
-        entity.setUID(UID_KEY,
+        entity.setUID(TimelineReaderUtils.UID_KEY,
             TimelineUIDConverter.APPLICATION_UID.encodeUID(context));
         return;
       default:
@@ -115,7 +111,7 @@ public class TimelineReaderManager extends AbstractService {
     context.setEntityType(entity.getType());
     context.setEntityIdPrefix(entity.getIdPrefix());
     context.setEntityId(entity.getId());
-    entity.setUID(UID_KEY,
+    entity.setUID(TimelineReaderUtils.UID_KEY,
         TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context));
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18b3a80d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
index 8f92433..4fd8468 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
@@ -47,6 +47,9 @@ public final class TimelineReaderUtils {
 
   public static final String FROMID_KEY = "FROM_ID";
 
+  @VisibleForTesting
+  public static final String UID_KEY = "UID";
+
   /**
    * Split the passed string along the passed delimiter character while looking
    * for escape char to interpret the splitted parts correctly. For delimiter or

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18b3a80d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
index 4bd37f8..f760834 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServices.java
@@ -238,7 +238,7 @@ public class TestTimelineReaderWebServices {
       assertEquals(3, entity.getConfigs().size());
       assertEquals(3, entity.getMetrics().size());
       assertTrue("UID should be present",
-          entity.getInfo().containsKey(TimelineReaderManager.UID_KEY));
+          entity.getInfo().containsKey(TimelineReaderUtils.UID_KEY));
       // Includes UID.
       assertEquals(3, entity.getInfo().size());
       // No events will be returned as events are not part of fields.
@@ -265,7 +265,7 @@ public class TestTimelineReaderWebServices {
       assertEquals(3, entity.getConfigs().size());
       assertEquals(3, entity.getMetrics().size());
       assertTrue("UID should be present",
-          entity.getInfo().containsKey(TimelineReaderManager.UID_KEY));
+          entity.getInfo().containsKey(TimelineReaderUtils.UID_KEY));
       // Includes UID.
       assertEquals(3, entity.getInfo().size());
       assertEquals(2, entity.getEvents().size());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: YARN-6146. Add Builder methods for TimelineEntityFilters (Haibo Chen via Varun Saxena)

Posted by st...@apache.org.
YARN-6146. Add Builder methods for TimelineEntityFilters (Haibo Chen via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b87b72b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b87b72b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b87b72b4

Branch: refs/heads/HADOOP-13345
Commit: b87b72b40a3cd4e124d6c941276481747133895f
Parents: 44999aa
Author: Varun Saxena <va...@apache.org>
Authored: Thu Mar 23 14:35:37 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:52 2017 +0530

----------------------------------------------------------------------
 .../storage/TestHBaseTimelineStorageApps.java   | 190 ++++++++---------
 .../TestHBaseTimelineStorageEntities.java       | 202 +++++++++----------
 .../flow/TestHBaseStorageFlowActivity.java      |   9 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  48 +++--
 .../storage/reader/TimelineEntityReader.java    |   2 +-
 .../reader/TimelineEntityFilters.java           | 160 ++++++++-------
 .../reader/TimelineReaderWebServices.java       |   6 +-
 .../reader/TimelineReaderWebServicesUtils.java  |  44 +++-
 .../TestFileSystemTimelineReaderImpl.java       | 100 +++++----
 9 files changed, 381 insertions(+), 380 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index 4b1147d..7eb9ad1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -660,7 +660,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(3, entities.size());
     int cfgCnt = 0;
@@ -697,8 +697,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, 1425016502000L, 1425016502040L, null,
-        null, null, null, null, null),
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502000L)
+            .createTimeEnd(1425016502040L).build(),
         new TimelineDataToRetrieve());
     assertEquals(3, entities.size());
     for (TimelineEntity entity : entities) {
@@ -714,8 +714,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, 1425016502015L, null, null, null, null,
-        null, null, null),
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502015L)
+            .build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     for (TimelineEntity entity : entities) {
@@ -729,8 +729,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, 1425016502015L, null, null, null,
-        null, null, null),
+        new TimelineEntityFilters.Builder().createTimeEnd(1425016502015L)
+            .build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     for (TimelineEntity entity : entities) {
@@ -757,7 +757,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve());
     assertEquals(3, es1.size());
     for (TimelineEntity e : es1) {
@@ -783,7 +783,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(
         null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null));
     assertEquals(3, es1.size());
@@ -813,8 +813,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt).build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(2, entities.size());
     int isRelatedToCnt = 0;
@@ -839,8 +838,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt1, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -863,8 +861,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt2, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     isRelatedToCnt = 0;
@@ -886,8 +883,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt3, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt3).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -910,8 +906,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt4, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt4).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -923,8 +918,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt5, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt5).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -944,8 +938,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, irt6, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt6).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -972,8 +965,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt).build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(2, entities.size());
     int relatesToCnt = 0;
@@ -998,8 +990,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt1, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1022,8 +1013,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt2, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     relatesToCnt = 0;
@@ -1045,8 +1035,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt3, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt3).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1069,8 +1058,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt4, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt4).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1082,8 +1070,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt5, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt5).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1103,8 +1090,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt6, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt6).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1140,8 +1126,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, rt7, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt7).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1178,8 +1163,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, relatesTo, isRelatedTo,
-        null, null, null, eventFilter),
+        new TimelineEntityFilters.Builder().relatesTo(relatesTo)
+            .isRelatedTo(isRelatedTo).eventFilters(eventFilter).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     int eventCnt = 0;
@@ -1216,8 +1201,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(2, entities.size());
@@ -1231,8 +1216,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
@@ -1248,8 +1233,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(1, entities.size());
@@ -1268,8 +1253,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList2, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(0, entities.size());
@@ -1281,8 +1266,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList3, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(0, entities.size());
@@ -1294,8 +1279,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList4, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList4)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(0, entities.size());
@@ -1307,8 +1292,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList5, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(3, entities.size());
@@ -1325,8 +1310,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef),
+        new TimelineEntityFilters.Builder().eventFilters(ef).build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(1, entities.size());
     int eventCnt = 0;
@@ -1347,8 +1331,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef1), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().eventFilters(ef1).build(),
+        new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -1366,8 +1350,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef2),
+        new TimelineEntityFilters.Builder().eventFilters(ef2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     eventCnt = 0;
@@ -1390,8 +1373,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef3),
+        new TimelineEntityFilters.Builder().eventFilters(ef3).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1408,8 +1390,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef4),
+        new TimelineEntityFilters.Builder().eventFilters(ef4).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -1430,8 +1411,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef5),
+        new TimelineEntityFilters.Builder().eventFilters(ef5).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -1460,7 +1440,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null) ,
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(list, null, null, null));
     int cfgCnt = 0;
     for (TimelineEntity entity : es1) {
@@ -1485,8 +1465,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve(list, null, null, null));
     assertEquals(1, entities.size());
     int cfgCnt = 0;
@@ -1518,8 +1498,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
         new TimelineDataToRetrieve(confsToRetrieve, null, null, null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
@@ -1549,8 +1529,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(2, entities.size());
@@ -1564,8 +1544,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(2, entities.size());
     metricCnt = 0;
@@ -1583,8 +1563,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(1, entities.size());
@@ -1603,8 +1583,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList2, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(0, entities.size());
@@ -1616,8 +1596,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList3, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(0, entities.size());
@@ -1629,8 +1609,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList4, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(0, entities.size());
@@ -1642,8 +1622,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList5, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(3, entities.size());
@@ -1665,7 +1645,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(null, list, null, null));
     int metricCnt = 0;
     for (TimelineEntity entity : es1) {
@@ -1690,8 +1670,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, list, null, null));
     int metricCnt = 0;
     assertEquals(1, entities.size());
@@ -1716,8 +1696,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, metricsToRetrieve, null, null));
     metricCnt = 0;
     assertEquals(2, entities.size());
@@ -1733,8 +1713,9 @@ public class TestHBaseTimelineStorageApps {
     entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1",
         "some_flow_name", 1002345678919L, null,
         TimelineEntityType.YARN_APPLICATION.toString(), null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null), new TimelineDataToRetrieve(null,
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(null,
         metricsToRetrieve, EnumSet.of(Field.METRICS), Integer.MAX_VALUE));
     metricCnt = 0;
     int metricValCnt = 0;
@@ -1769,8 +1750,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(2, entities.size());
     int infoCnt = 0;
@@ -1786,8 +1766,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList1,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(1, entities.size());
     infoCnt = 0;
@@ -1805,8 +1785,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList2,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(0, entities.size());
 
@@ -1817,8 +1797,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList3,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(0, entities.size());
 
@@ -1829,8 +1809,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList4,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList4)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(0, entities.size());
 
@@ -1841,8 +1821,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList5,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(3, entities.size());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 1237fb3..380d352 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -300,7 +300,7 @@ public class TestHBaseTimelineStorageEntities {
       Set<TimelineEntity> es1 = reader.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), null),
-          new TimelineEntityFilters(),
+          new TimelineEntityFilters.Builder().build(),
           new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL),
           Integer.MAX_VALUE));
       assertNotNull(e1);
@@ -455,7 +455,7 @@ public class TestHBaseTimelineStorageEntities {
       Set<TimelineEntity> es1 = reader.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), null),
-          new TimelineEntityFilters(),
+          new TimelineEntityFilters.Builder().build(),
           new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
       assertNotNull(e1);
       assertEquals(1, es1.size());
@@ -553,7 +553,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world",
-        null), new TimelineEntityFilters(),
+        null), new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(3, entities.size());
     int cfgCnt = 0;
@@ -589,8 +589,9 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, 1425016502000L, 1425016502040L, null,
-        null, null, null, null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502000L)
+            .createTimeEnd(1425016502040L).build(),
+        new TimelineDataToRetrieve());
     assertEquals(3, entities.size());
     for (TimelineEntity entity : entities) {
       if (!entity.getId().equals("hello") && !entity.getId().equals("hello1") &&
@@ -602,8 +603,9 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, 1425016502015L, null, null, null, null,
-        null, null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502015L)
+            .build(),
+        new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     for (TimelineEntity entity : entities) {
       if (!entity.getId().equals("hello1") &&
@@ -614,8 +616,9 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world",  null),
-        new TimelineEntityFilters(null, null, 1425016502015L, null, null, null,
-        null, null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().createTimeEnd(1425016502015L)
+            .build(),
+        new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     for (TimelineEntity entity : entities) {
       if (!entity.getId().equals("hello")) {
@@ -647,8 +650,9 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, relatesTo, isRelatedTo,
-        null, null, null, eventFilter), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().relatesTo(relatesTo)
+            .isRelatedTo(isRelatedTo).eventFilters(eventFilter).build(),
+        new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     int eventCnt = 0;
     int isRelatedToCnt = 0;
@@ -676,8 +680,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef),
+        new TimelineEntityFilters.Builder().eventFilters(ef).build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(1, entities.size());
     int eventCnt = 0;
@@ -697,8 +700,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef1),
+        new TimelineEntityFilters.Builder().eventFilters(ef1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -716,8 +718,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef2),
+        new TimelineEntityFilters.Builder().eventFilters(ef2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     eventCnt = 0;
@@ -738,8 +739,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef3),
+        new TimelineEntityFilters.Builder().eventFilters(ef3).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -755,8 +755,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef4),
+        new TimelineEntityFilters.Builder().eventFilters(ef4).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -776,8 +775,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, ef5),
+        new TimelineEntityFilters.Builder().eventFilters(ef5).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     eventCnt = 0;
@@ -802,8 +800,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt).build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(2, entities.size());
     int isRelatedToCnt = 0;
@@ -826,8 +823,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt1, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -849,8 +845,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt2, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     isRelatedToCnt = 0;
@@ -870,8 +865,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt3, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt3).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -893,8 +887,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt4, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt4).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -905,8 +898,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt5, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt5).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -925,8 +917,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, irt6, null, null,
-        null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(irt6).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     isRelatedToCnt = 0;
@@ -951,8 +942,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt).build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(2, entities.size());
     int relatesToCnt = 0;
@@ -975,8 +965,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt1, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt1).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -998,8 +987,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt2, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt2).build(),
         new TimelineDataToRetrieve());
     assertEquals(2, entities.size());
     relatesToCnt = 0;
@@ -1019,8 +1007,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt3, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt3).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1042,8 +1029,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt4, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt4).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1054,8 +1040,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt5, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt5).build(),
         new TimelineDataToRetrieve());
     assertEquals(0, entities.size());
 
@@ -1074,8 +1059,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt6, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt6).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1110,8 +1094,7 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, rt7, null, null, null, null,
-        null),
+        new TimelineEntityFilters.Builder().relatesTo(rt7).build(),
         new TimelineDataToRetrieve());
     assertEquals(1, entities.size());
     relatesToCnt = 0;
@@ -1138,7 +1121,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve());
     assertEquals(3, es1.size());
     for (TimelineEntity e : es1) {
@@ -1162,7 +1145,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(
         null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null));
     assertEquals(3, es1.size());
@@ -1193,7 +1176,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(list, null, null, null));
     int cfgCnt = 0;
     for (TimelineEntity entity : es1) {
@@ -1223,8 +1206,8 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(2, entities.size());
@@ -1237,8 +1220,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
@@ -1253,8 +1236,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(1, entities.size());
@@ -1272,8 +1255,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList2, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(0, entities.size());
@@ -1284,8 +1267,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList3, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(0, entities.size());
@@ -1294,11 +1277,11 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineKeyValueFilter(
         TimelineCompareOp.NOT_EQUAL, "dummy_config", "value1"));
     entities = reader.getEntities(
-            new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
             1002345678919L, "application_1231111111_1111", "world", null),
-            new TimelineEntityFilters(null, null, null, null, null, null,
-            confFilterList4, null, null),
-            new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList4)
+            .build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
             null));
     assertEquals(0, entities.size());
 
@@ -1308,8 +1291,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList5, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
         null));
     assertEquals(3, entities.size());
@@ -1326,8 +1309,8 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve(list, null, null, null));
     assertEquals(1, entities.size());
     int cfgCnt = 0;
@@ -1357,8 +1340,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
         new TimelineDataToRetrieve(confsToRetrieve, null, null, null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
@@ -1386,7 +1369,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(),
+        new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(null, list, null, null));
     int metricCnt = 0;
     for (TimelineEntity entity : es1) {
@@ -1414,8 +1397,8 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(2, entities.size());
@@ -1428,8 +1411,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     assertEquals(2, entities.size());
     metricCnt = 0;
@@ -1446,8 +1429,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(1, entities.size());
@@ -1465,8 +1448,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList2, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(0, entities.size());
@@ -1477,8 +1460,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList3, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(0, entities.size());
@@ -1489,8 +1472,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList4, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(0, entities.size());
@@ -1501,8 +1484,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList5, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
         null));
     assertEquals(3, entities.size());
@@ -1519,8 +1502,8 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve(null, list, null, null));
     assertEquals(1, entities.size());
     int metricCnt = 0;
@@ -1548,8 +1531,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve(
         null, metricsToRetrieve, EnumSet.of(Field.METRICS), null));
     assertEquals(2, entities.size());
@@ -1567,8 +1550,10 @@ public class TestHBaseTimelineStorageEntities {
 
     entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1",
         "some_flow_name", 1002345678919L, "application_1231111111_1111",
-        "world", null), new TimelineEntityFilters(null, null, null, null, null,
-        null, null, metricFilterList1, null), new TimelineDataToRetrieve(null,
+        "world", null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
+        new TimelineDataToRetrieve(null,
         metricsToRetrieve, EnumSet.of(Field.METRICS), Integer.MAX_VALUE));
     assertEquals(2, entities.size());
     metricCnt = 0;
@@ -1602,8 +1587,7 @@ public class TestHBaseTimelineStorageEntities {
     Set<TimelineEntity> entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(2, entities.size());
     int infoCnt = 0;
@@ -1618,8 +1602,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList1,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList1)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(1, entities.size());
     infoCnt = 0;
@@ -1636,8 +1620,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList2,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList2)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(0, entities.size());
 
@@ -1647,8 +1631,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList3,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList3)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(0, entities.size());
 
@@ -1658,8 +1642,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList4,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList4)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(0, entities.size());
 
@@ -1669,8 +1653,8 @@ public class TestHBaseTimelineStorageEntities {
     entities = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList5,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList5)
+            .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
     assertEquals(3, entities.size());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
index 1af47a6..0923105 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
@@ -184,8 +184,7 @@ public class TestHBaseStorageFlowActivity {
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, null, null, null, null,
           TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null),
-          new TimelineEntityFilters(10L, null, null, null, null, null,
-          null, null, null),
+          new TimelineEntityFilters.Builder().entityLimit(10L).build(),
           new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity e : entities) {
@@ -249,8 +248,7 @@ public class TestHBaseStorageFlowActivity {
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null),
-          new TimelineEntityFilters(10L, null, null, null, null, null,
-          null, null, null),
+          new TimelineEntityFilters.Builder().entityLimit(10L).build(),
           new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity e : entities) {
@@ -377,8 +375,7 @@ public class TestHBaseStorageFlowActivity {
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, null, null, null, null,
           TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null),
-          new TimelineEntityFilters(10L, null, null, null, null, null,
-          null, null, null),
+          new TimelineEntityFilters.Builder().entityLimit(10L).build(),
           new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity e : entities) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
index e376c6c..e1309e7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
@@ -608,7 +608,7 @@ public class TestHBaseStorageFlowRun {
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(),
+          new TimelineEntityFilters.Builder().build(),
           new TimelineDataToRetrieve(null, metricsToRetrieve, null, null));
       assertEquals(2, entities.size());
       int metricCnt = 0;
@@ -669,7 +669,7 @@ public class TestHBaseStorageFlowRun {
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(),
+          new TimelineEntityFilters.Builder().build(),
           new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity timelineEntity : entities) {
@@ -679,8 +679,9 @@ public class TestHBaseStorageFlowRun {
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(), new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          new TimelineEntityFilters.Builder().build(),
+          new TimelineDataToRetrieve(null, null,
+              EnumSet.of(Field.METRICS), null));
       assertEquals(1, entities.size());
       for (TimelineEntity timelineEntity : entities) {
         Set<TimelineMetric> timelineMetrics = timelineEntity.getMetrics();
@@ -850,8 +851,9 @@ public class TestHBaseStorageFlowRun {
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow,
           null, null, TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, 1425016501000L, 1425016502001L, null,
-          null, null, null, null, null), new TimelineDataToRetrieve());
+          new TimelineEntityFilters.Builder().createdTimeBegin(1425016501000L)
+              .createTimeEnd(1425016502001L).build(),
+          new TimelineDataToRetrieve());
       assertEquals(2, entities.size());
       for (TimelineEntity entity : entities) {
         if (!entity.getId().equals("user2@flow_name2/1002345678918") &&
@@ -863,8 +865,9 @@ public class TestHBaseStorageFlowRun {
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, 1425016501050L, null, null, null,
-          null, null, null, null), new TimelineDataToRetrieve());
+          new TimelineEntityFilters.Builder().createdTimeBegin(1425016501050L)
+              .build(),
+          new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity entity : entities) {
         if (!entity.getId().equals("user2@flow_name2/1002345678918")) {
@@ -874,8 +877,9 @@ public class TestHBaseStorageFlowRun {
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, 1425016501050L, null, null,
-          null, null, null, null), new TimelineDataToRetrieve());
+          new TimelineEntityFilters.Builder().createTimeEnd(1425016501050L)
+              .build(),
+          new TimelineDataToRetrieve());
       assertEquals(1, entities.size());
       for (TimelineEntity entity : entities) {
         if (!entity.getId().equals("user2@flow_name2/1002345678919")) {
@@ -941,8 +945,9 @@ public class TestHBaseStorageFlowRun {
       Set<TimelineEntity> entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null,
           null, TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList, null), new TimelineDataToRetrieve(null, null,
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+              .build(),
+          new TimelineDataToRetrieve(null, null,
           EnumSet.of(Field.METRICS), null));
       assertEquals(2, entities.size());
       int metricCnt = 0;
@@ -958,8 +963,9 @@ public class TestHBaseStorageFlowRun {
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList1, null), new TimelineDataToRetrieve(null, null,
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+              .build(),
+          new TimelineDataToRetrieve(null, null,
           EnumSet.of(Field.METRICS), null));
       assertEquals(1, entities.size());
       metricCnt = 0;
@@ -974,8 +980,9 @@ public class TestHBaseStorageFlowRun {
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList2, null), new TimelineDataToRetrieve(null, null,
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
+              .build(),
+          new TimelineDataToRetrieve(null, null,
           EnumSet.of(Field.METRICS), null));
       assertEquals(0, entities.size());
 
@@ -984,8 +991,9 @@ public class TestHBaseStorageFlowRun {
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList3, null), new TimelineDataToRetrieve(null, null,
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
+              .build(),
+          new TimelineDataToRetrieve(null, null,
           EnumSet.of(Field.METRICS), null));
       assertEquals(0, entities.size());
 
@@ -1005,8 +1013,8 @@ public class TestHBaseStorageFlowRun {
       entities = hbr.getEntities(
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineEntityFilters(null, null, null, null, null, null, null,
-          metricFilterList4, null),
+          new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
+              .build(),
           new TimelineDataToRetrieve(null, metricsToRetrieve,
           EnumSet.of(Field.ALL), null));
       assertEquals(2, entities.size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
index c31ccc1..07e8423 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
@@ -186,7 +186,7 @@ public abstract class TimelineEntityReader extends
    */
   protected void createFiltersIfNull() {
     if (filters == null) {
-      filters = new TimelineEntityFilters();
+      filters = new TimelineEntityFilters.Builder().build();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
index dc3e3ec..a415d34 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
@@ -107,17 +107,17 @@ import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyVa
  */
 @Private
 @Unstable
-public class TimelineEntityFilters {
-  private long limit;
+public final class TimelineEntityFilters {
+  private final long limit;
   private long createdTimeBegin;
   private long createdTimeEnd;
-  private TimelineFilterList relatesTo;
-  private TimelineFilterList isRelatedTo;
-  private TimelineFilterList infoFilters;
-  private TimelineFilterList configFilters;
-  private TimelineFilterList metricFilters;
-  private TimelineFilterList eventFilters;
-  private String fromId;
+  private final TimelineFilterList relatesTo;
+  private final TimelineFilterList isRelatedTo;
+  private final TimelineFilterList infoFilters;
+  private final TimelineFilterList configFilters;
+  private final TimelineFilterList metricFilters;
+  private final TimelineFilterList eventFilters;
+  private final String fromId;
   private static final long DEFAULT_BEGIN_TIME = 0L;
   private static final long DEFAULT_END_TIME = Long.MAX_VALUE;
 
@@ -127,30 +127,14 @@ public class TimelineEntityFilters {
    */
   public static final long DEFAULT_LIMIT = 100;
 
-  public TimelineEntityFilters() {
-    this(null, null, null, null, null, null, null, null, null);
-  }
-
-  public TimelineEntityFilters(Long entityLimit, Long timeBegin, Long timeEnd,
-      TimelineFilterList entityRelatesTo, TimelineFilterList entityIsRelatedTo,
-      TimelineFilterList entityInfoFilters,
-      TimelineFilterList entityConfigFilters,
-      TimelineFilterList entityMetricFilters,
-      TimelineFilterList entityEventFilters, String fromid) {
-    this(entityLimit, timeBegin, timeEnd, entityRelatesTo, entityIsRelatedTo,
-        entityInfoFilters, entityConfigFilters, entityMetricFilters,
-        entityEventFilters);
-    this.fromId = fromid;
-  }
-
-  public TimelineEntityFilters(
+  private TimelineEntityFilters(
       Long entityLimit, Long timeBegin, Long timeEnd,
       TimelineFilterList entityRelatesTo,
       TimelineFilterList entityIsRelatedTo,
       TimelineFilterList entityInfoFilters,
       TimelineFilterList entityConfigFilters,
       TimelineFilterList  entityMetricFilters,
-      TimelineFilterList entityEventFilters) {
+      TimelineFilterList entityEventFilters, String fromId) {
     if (entityLimit == null || entityLimit < 0) {
       this.limit = DEFAULT_LIMIT;
     } else {
@@ -172,97 +156,119 @@ public class TimelineEntityFilters {
     this.configFilters = entityConfigFilters;
     this.metricFilters = entityMetricFilters;
     this.eventFilters = entityEventFilters;
+    this.fromId = fromId;
   }
 
   public long getLimit() {
     return limit;
   }
 
-  public void setLimit(Long entityLimit) {
-    if (entityLimit == null || entityLimit < 0) {
-      this.limit = DEFAULT_LIMIT;
-    } else {
-      this.limit = entityLimit;
-    }
-  }
-
   public long getCreatedTimeBegin() {
     return createdTimeBegin;
   }
 
-  public void setCreatedTimeBegin(Long timeBegin) {
-    if (timeBegin == null || timeBegin < 0) {
-      this.createdTimeBegin = DEFAULT_BEGIN_TIME;
-    } else {
-      this.createdTimeBegin = timeBegin;
-    }
-  }
-
   public long getCreatedTimeEnd() {
     return createdTimeEnd;
   }
 
-  public void setCreatedTimeEnd(Long timeEnd) {
-    if (timeEnd == null || timeEnd < 0) {
-      this.createdTimeEnd = DEFAULT_END_TIME;
-    } else {
-      this.createdTimeEnd = timeEnd;
-    }
-  }
-
   public TimelineFilterList getRelatesTo() {
     return relatesTo;
   }
 
-  public void setRelatesTo(TimelineFilterList relations) {
-    this.relatesTo = relations;
-  }
-
   public TimelineFilterList getIsRelatedTo() {
     return isRelatedTo;
   }
 
-  public void setIsRelatedTo(TimelineFilterList relations) {
-    this.isRelatedTo = relations;
-  }
-
   public TimelineFilterList getInfoFilters() {
     return infoFilters;
   }
 
-  public void setInfoFilters(TimelineFilterList filters) {
-    this.infoFilters = filters;
-  }
-
   public TimelineFilterList getConfigFilters() {
     return configFilters;
   }
 
-  public void setConfigFilters(TimelineFilterList filters) {
-    this.configFilters = filters;
-  }
-
   public TimelineFilterList getMetricFilters() {
     return metricFilters;
   }
 
-  public void setMetricFilters(TimelineFilterList filters) {
-    this.metricFilters = filters;
-  }
-
   public TimelineFilterList getEventFilters() {
     return eventFilters;
   }
 
-  public void setEventFilters(TimelineFilterList filters) {
-    this.eventFilters = filters;
-  }
-
   public String getFromId() {
     return fromId;
   }
 
-  public void setFromId(String fromId) {
-    this.fromId = fromId;
+  /**
+   * A builder class to build an instance of TimelineEntityFilters.
+   */
+  public static class Builder {
+    private Long entityLimit;
+    private Long createdTimeBegin;
+    private Long createdTimeEnd;
+    private TimelineFilterList relatesToFilters;
+    private TimelineFilterList isRelatedToFilters;
+    private TimelineFilterList entityInfoFilters;
+    private TimelineFilterList entityConfigFilters;
+    private TimelineFilterList entityMetricFilters;
+    private TimelineFilterList entityEventFilters;
+    private String entityFromId;
+
+    public Builder entityLimit(Long limit) {
+      this.entityLimit = limit;
+      return this;
+    }
+
+    public Builder createdTimeBegin(Long timeBegin) {
+      this.createdTimeBegin = timeBegin;
+      return this;
+    }
+
+    public Builder createTimeEnd(Long timeEnd) {
+      this.createdTimeEnd = timeEnd;
+      return this;
+    }
+
+    public Builder relatesTo(TimelineFilterList relatesTo) {
+      this.relatesToFilters = relatesTo;
+      return this;
+    }
+
+    public Builder isRelatedTo(TimelineFilterList isRelatedTo) {
+      this.isRelatedToFilters = isRelatedTo;
+      return this;
+    }
+
+    public Builder infoFilters(TimelineFilterList infoFilters) {
+      this.entityInfoFilters = infoFilters;
+      return this;
+    }
+
+    public Builder configFilters(TimelineFilterList configFilters) {
+      this.entityConfigFilters = configFilters;
+      return this;
+    }
+
+    public Builder metricFilters(TimelineFilterList metricFilters) {
+      this.entityMetricFilters = metricFilters;
+      return this;
+    }
+
+    public Builder eventFilters(TimelineFilterList eventFilters) {
+      this.entityEventFilters = eventFilters;
+      return this;
+    }
+
+    public Builder fromId(String fromId) {
+      this.entityFromId = fromId;
+      return this;
+    }
+
+    public TimelineEntityFilters build() {
+      return new TimelineEntityFilters(entityLimit, createdTimeBegin,
+          createdTimeEnd, relatesToFilters, isRelatedToFilters,
+          entityInfoFilters, entityConfigFilters, entityMetricFilters,
+          entityEventFilters, entityFromId);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 94ac948..360ac20 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -1393,10 +1393,8 @@ public class TimelineReaderWebServices {
       DateRange range = parseDateRange(dateRange);
       TimelineEntityFilters entityFilters =
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
-              limit, null, null, null, null, null, null, null, null,
-              fromId);
-      entityFilters.setCreatedTimeBegin(range.dateStart);
-      entityFilters.setCreatedTimeEnd(range.dateEnd);
+              limit, range.dateStart, range.dateEnd,
+              null, null, null, null, null, null, fromId);
       entities = timelineReaderManager.getEntities(
           TimelineReaderWebServicesUtils.createTimelineReaderContext(
           clusterId, null, null, null, null,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDFS-12317. HDFS metrics render error in the page of Github. Contributed by Yiqun Lin.

Posted by st...@apache.org.
HDFS-12317. HDFS metrics render error in the page of Github. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac12e153
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac12e153
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac12e153

Branch: refs/heads/HADOOP-13345
Commit: ac12e153a8cddb5def3b7f308c7d4d990b5b0436
Parents: 71bbb86
Author: Yiqun Lin <yq...@apache.org>
Authored: Thu Aug 31 17:23:17 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Thu Aug 31 17:23:17 2017 +0800

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md  | 24 ++++++++++----------
 1 file changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac12e153/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 4543fac..367d9e0 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -179,8 +179,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
 | `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds |
 | `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
 | `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
-| `ResourceCheckTime`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
-| `StorageBlockReport`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `StorageBlockReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 
 FSNamesystem
 ------------
@@ -240,8 +240,8 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
 | `NumInMaintenanceLiveDataNodes` | Number of live Datanodes which are in maintenance state |
 | `NumInMaintenanceDeadDataNodes` | Number of dead Datanodes which are in maintenance state |
 | `NumEnteringMaintenanceDataNodes` | Number of Datanodes that are entering the maintenance state |
-| `FSN(Read|Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations |
-| `FSN(Read|Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds |
+| `FSN(Read/Write)Lock`*OperationName*`NumOps` | Total number of acquiring lock by operations |
+| `FSN(Read/Write)Lock`*OperationName*`AvgTime` | Average time of holding the lock by operations in milliseconds |
 
 JournalNode
 -----------
@@ -308,13 +308,13 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `RamDiskBlocksEvictedWithoutRead` | Total number of blocks evicted in memory without ever being read from memory |
 | `RamDiskBlocksEvictionWindowMsNumOps` | Number of blocks evicted in memory|
 | `RamDiskBlocksEvictionWindowMsAvgTime` | Average time of blocks in memory before being evicted in milliseconds |
-| `RamDiskBlocksEvictionWindows`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and eviction in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksEvictionWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and eviction in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `RamDiskBlocksLazyPersisted` | Total number of blocks written to disk by lazy writer |
 | `RamDiskBlocksDeletedBeforeLazyPersisted` | Total number of blocks deleted by application before being persisted to disk |
 | `RamDiskBytesLazyPersisted` | Total number of bytes written to disk by lazy writer |
 | `RamDiskBlocksLazyPersistWindowMsNumOps` | Number of blocks written to disk by lazy writer |
 | `RamDiskBlocksLazyPersistWindowMsAvgTime` | Average time of blocks written to disk by lazy writer in milliseconds |
-| `RamDiskBlocksLazyPersistWindows`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and disk persist in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksLazyPersistWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and disk persist in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FsyncCount` | Total number of fsync |
 | `VolumeFailures` | Total number of volume failures occurred |
 | `ReadBlockOpNumOps` | Total number of read operations |
@@ -380,23 +380,23 @@ contains tags such as Hostname as additional information along with metrics.
 | `TotalMetadataOperations` | Total number (monotonically increasing) of metadata operations. Metadata operations include stat, list, mkdir, delete, move, open and posix_fadvise. |
 | `MetadataOperationRateNumOps` | The number of metadata operations within an interval time of metric |
 | `MetadataOperationRateAvgTime` | Mean time of metadata operations in milliseconds |
-| `MetadataOperationLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of metadata operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `MetadataOperationLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of metadata operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalDataFileIos` | Total number (monotonically increasing) of data file io operations |
 | `DataFileIoRateNumOps` | The number of data file io operations within an interval time of metric |
 | `DataFileIoRateAvgTime` | Mean time of data file io operations in milliseconds |
-| `DataFileIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of data file io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `DataFileIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of data file io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FlushIoRateNumOps` | The number of file flush io operations within an interval time of metric |
 | `FlushIoRateAvgTime` | Mean time of file flush io operations in milliseconds |
-| `FlushIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of file flush io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `FlushIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file flush io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SyncIoRateNumOps` | The number of file sync io operations within an interval time of metric |
 | `SyncIoRateAvgTime` | Mean time of file sync io operations in milliseconds |
-| `SyncIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of file sync io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `SyncIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file sync io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `ReadIoRateNumOps` | The number of file read io operations within an interval time of metric |
 | `ReadIoRateAvgTime` | Mean time of file read io operations in milliseconds |
-| `ReadIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of file read io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `ReadIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file read io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `WriteIoRateNumOps` | The number of file write io operations within an interval time of metric |
 | `WriteIoRateAvgTime` | Mean time of file write io operations in milliseconds |
-| `WriteIoLatency`*num*`s(50|75|90|95|99)thPercentileLatency` | The 50/75/90/95/99th percentile of file write io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `WriteIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file write io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalFileIoErrors` | Total number (monotonically increasing) of file io error operations |
 | `FileIoErrorRateNumOps` | The number of file io error operations within an interval time of metric |
 | `FileIoErrorRateAvgTime` | It measures the mean time in milliseconds from the start of an operation to hitting a failure |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HADOOP-14670. Increase minimum cmake version for all platforms

Posted by st...@apache.org.
HADOOP-14670. Increase minimum cmake version for all platforms

Signed-off-by: Chris Douglas <cd...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71bbb86d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71bbb86d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71bbb86d

Branch: refs/heads/HADOOP-13345
Commit: 71bbb86d69ac474596f5619d22718e9f7ff5f9dc
Parents: ce79f7b
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Jul 19 17:51:14 2017 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 30 21:47:24 2017 -0700

----------------------------------------------------------------------
 BUILDING.txt                                    |  4 +--
 dev-support/docker/Dockerfile                   | 17 ++++++++++---
 .../hadoop-common/HadoopCommon.cmake            | 26 ++++++++++++--------
 .../hadoop-common/src/CMakeLists.txt            |  2 +-
 .../bzip2/org_apache_hadoop_io_compress_bzip2.h |  2 ++
 .../src/CMakeLists.txt                          |  2 +-
 .../src/main/native/fuse-dfs/CMakeLists.txt     |  2 ++
 .../main/native/libhdfs-tests/CMakeLists.txt    |  2 ++
 .../src/main/native/libhdfs/CMakeLists.txt      |  3 +++
 .../src/CMakeLists.txt                          |  2 +-
 .../maven/plugin/cmakebuilder/CompileMojo.java  |  2 --
 hadoop-tools/hadoop-pipes/src/CMakeLists.txt    |  2 +-
 .../src/CMakeLists.txt                          | 22 ++---------------
 start-build-env.sh                              |  2 ++
 14 files changed, 49 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index f9cc842..14deec8 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -7,7 +7,7 @@ Requirements:
 * JDK 1.8+
 * Maven 3.3 or later
 * ProtocolBuffer 2.5.0
-* CMake 2.6 or newer (if compiling native code), must be 3.0 or newer on Mac
+* CMake 3.1 or newer (if compiling native code)
 * Zlib devel (if compiling native code)
 * openssl devel (if compiling native hadoop-pipes and to get the best HDFS encryption performance)
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling fuse_dfs)
@@ -345,7 +345,7 @@ Requirements:
 * JDK 1.8+
 * Maven 3.0 or later
 * ProtocolBuffer 2.5.0
-* CMake 2.6 or newer
+* CMake 3.1 or newer
 * Windows SDK 7.1 or Visual Studio 2010 Professional
 * Windows SDK 8.1 (if building CPU rate control for the container executor)
 * zlib headers (if building native code bindings for zlib)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/dev-support/docker/Dockerfile
----------------------------------------------------------------------
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 1775323..31ac611 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -35,7 +35,6 @@ ENV DEBCONF_TERSE true
 RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
     build-essential \
     bzip2 \
-    cmake \
     curl \
     doxygen \
     fuse \
@@ -89,11 +88,22 @@ RUN apt-get -q update && apt-get -q install --no-install-recommends -y ant
 ######
 RUN mkdir -p /opt/maven && \
     curl -L -s -S \
-         http://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \
+         https://www-us.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.tar.gz \
          -o /opt/maven.tar.gz && \
     tar xzf /opt/maven.tar.gz --strip-components 1 -C /opt/maven
 ENV MAVEN_HOME /opt/maven
-ENV PATH "$PATH:/opt/maven/bin"
+ENV PATH "${PATH}:/opt/maven/bin"
+
+######
+# Install cmake
+######
+RUN mkdir -p /opt/cmake && \
+    curl -L -s -S \
+         https://cmake.org/files/v3.1/cmake-3.1.0-Linux-x86_64.tar.gz \
+         -o /opt/cmake.tar.gz && \
+    tar xzf /opt/cmake.tar.gz --strip-components 1 -C /opt/cmake
+ENV CMAKE_HOME /opt/cmake
+ENV PATH "${PATH}:/opt/cmake/bin"
 
 ######
 # Install findbugs
@@ -104,6 +114,7 @@ RUN mkdir -p /opt/findbugs && \
          -o /opt/findbugs.tar.gz && \
     tar xzf /opt/findbugs.tar.gz --strip-components 1 -C /opt/findbugs
 ENV FINDBUGS_HOME /opt/findbugs
+ENV PATH "${PATH}:/opt/findbugs/bin"
 
 ####
 # Install shellcheck

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-common-project/hadoop-common/HadoopCommon.cmake
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/HadoopCommon.cmake b/hadoop-common-project/hadoop-common/HadoopCommon.cmake
index c46d9e5..faabeed 100644
--- a/hadoop-common-project/hadoop-common/HadoopCommon.cmake
+++ b/hadoop-common-project/hadoop-common/HadoopCommon.cmake
@@ -117,19 +117,25 @@ macro(hadoop_set_find_shared_library_without_version)
     endif()
 endmacro()
 
-#
-# Configuration.
-#
 
-# Initialise the shared gcc/g++ flags if they aren't already defined.
-if(NOT DEFINED GCC_SHARED_FLAGS)
-    set(GCC_SHARED_FLAGS "-g -O2 -Wall -pthread -D_FILE_OFFSET_BITS=64")
-endif()
+# set the shared compiler flags
+# support for GNU C/C++, add other compilers as necessary
 
-# Add in support other compilers here, if necessary,
-# the assumption is that GCC or a GCC-compatible compiler is being used.
+if (CMAKE_C_COMPILER_ID STREQUAL "GNU")
+  if(NOT DEFINED GCC_SHARED_FLAGS)
+    find_package(Threads REQUIRED)
+    if(CMAKE_USE_PTHREADS_INIT)
+      set(GCC_SHARED_FLAGS "-g -O2 -Wall -pthread -D_FILE_OFFSET_BITS=64")
+    else()
+      set(GCC_SHARED_FLAGS "-g -O2 -Wall -D_FILE_OFFSET_BITS=64")
+    endif()
+  endif()
+elseif (CMAKE_C_COMPILER_ID STREQUAL "Clang" OR
+        CMAKE_C_COMPILER_ID STREQUAL "AppleClang")
+  set(GCC_SHARED_FLAGS "-g -O2 -Wall -D_FILE_OFFSET_BITS=64")
+endif()
 
-# Set the shared GCC-compatible compiler and linker flags.
+# Set the shared linker flags.
 hadoop_add_compiler_flags("${GCC_SHARED_FLAGS}")
 hadoop_add_linker_flags("${LINKER_SHARED_FLAGS}")
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-common-project/hadoop-common/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
index 10b0f23..b9287c0 100644
--- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -20,7 +20,7 @@
 # CMake configuration.
 #
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/..)
 include(HadoopCommon)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h
index fa525bd..a1db6c6 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/bzip2/org_apache_hadoop_io_compress_bzip2.h
@@ -27,7 +27,9 @@
 
 #include "org_apache_hadoop.h"
 
+#ifndef HADOOP_BZIP2_LIBRARY
 #define HADOOP_BZIP2_LIBRARY "libbz2.so.1"
+#endif
 
 
 /* A helper macro to convert the java 'stream-handle' to a bz_stream pointer. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 14b62e7..aa690e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 enable_testing()
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
index 44b18e6..c1ba3ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
@@ -16,6 +16,8 @@
 # limitations under the License.
 #
 
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
 set(CMAKE_SKIP_RPATH TRUE)
 
 # Flatten a list into a string.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
index 08c9f8b..08fc030 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
@@ -16,6 +16,8 @@
 # limitations under the License.
 #
 
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
 include_directories(
     ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs/include
     ${GENERATED_JAVAH}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
index a0cd042..2883585 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt
@@ -15,6 +15,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
+
 add_definitions(-DLIBHDFS_DLL_EXPORT)
 
 include_directories(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
index bbeece9..ae3b9c6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/CMakeLists.txt
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../../hadoop-common-project/hadoop-common/)
 include(HadoopCommon)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java
index 0196352..9b0bde7 100644
--- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java
+++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/cmakebuilder/CompileMojo.java
@@ -96,8 +96,6 @@ public class CompileMojo extends AbstractMojo {
     validatePlatform();
     runCMake();
     runMake();
-    runMake(); // The second make is a workaround for HADOOP-9215.  It can be
-               // removed when cmake 2.6 is no longer supported.
     long end = System.nanoTime();
     getLog().info("cmake compilation finished successfully in " +
           TimeUnit.MILLISECONDS.convert(end - start, TimeUnit.NANOSECONDS) +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-pipes/src/CMakeLists.txt b/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
index 3b0b28c..ff660bf 100644
--- a/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
+++ b/hadoop-tools/hadoop-pipes/src/CMakeLists.txt
@@ -16,7 +16,7 @@
 # limitations under the License.
 #
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 07c29bf..7f2b00d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
 
 list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)
@@ -29,25 +29,7 @@ set(GTEST_SRC_DIR ${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoo
 string(REGEX MATCH . HCD_ONE "${HADOOP_CONF_DIR}")
 string(COMPARE EQUAL ${HCD_ONE} / HADOOP_CONF_DIR_IS_ABS)
 
-if (CMAKE_VERSION VERSION_LESS "3.1")
-  # subset of CMAKE_<LANG>_COMPILER_ID
-  # https://cmake.org/cmake/help/v3.0/variable/CMAKE_LANG_COMPILER_ID.html
-  if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR
-      CMAKE_C_COMPILER_ID STREQUAL "Clang" OR
-      CMAKE_C_COMPILER_ID STREQUAL "AppleClang")
-    set (CMAKE_C_FLAGS "-std=c99 -Wall -pedantic-errors ${CMAKE_C_FLAGS}")
-  elseif (CMAKE_C_COMPILER_ID STREQUAL "Intel")
-    set (CMAKE_C_FLAGS "-std=c99 -Wall ${CMAKE_C_FLAGS}")
-  elseif (CMAKE_C_COMPILER_ID STREQUAL "SunPro")
-    set (CMAKE_C_FLAGS "-xc99 ${CMAKE_C_FLAGS}")
-  endif ()
-else ()
-  set (CMAKE_C_STANDARD 99)
-endif ()
-
-# Note: can't use -D_FILE_OFFSET_BITS=64, see MAPREDUCE-4258
-string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
-string(REPLACE "-D_FILE_OFFSET_BITS=64" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+set (CMAKE_C_STANDARD 99)
 
 include(CheckIncludeFiles)
 check_include_files("sys/types.h;sys/sysctl.h" HAVE_SYS_SYSCTL_H)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71bbb86d/start-build-env.sh
----------------------------------------------------------------------
diff --git a/start-build-env.sh b/start-build-env.sh
index 94af7e4..5a18151 100755
--- a/start-build-env.sh
+++ b/start-build-env.sh
@@ -35,7 +35,9 @@ docker build -t "hadoop-build-${USER_ID}" - <<UserSpecificDocker
 FROM hadoop-build
 RUN groupadd --non-unique -g ${GROUP_ID} ${USER_NAME}
 RUN useradd -g ${GROUP_ID} -u ${USER_ID} -k /root -m ${USER_NAME}
+RUN echo "${USER_NAME}\tALL=NOPASSWD: ALL" > "/etc/sudoers.d/hadoop-build-${USER_ID}"
 ENV HOME /home/${USER_NAME}
+
 UserSpecificDocker
 
 # By mapping the .m2 directory you can do an mvn install from


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: YARN-6047 Documentation updates for TimelineService v2 (Contributed by Rohith Sharma)

Posted by st...@apache.org.
YARN-6047 Documentation updates for TimelineService v2 (Contributed by Rohith Sharma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bea3e4df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bea3e4df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bea3e4df

Branch: refs/heads/HADOOP-13345
Commit: bea3e4df76dfcae7de804def7bd39b2b1d6639c9
Parents: 9b08f36
Author: Vrushali C <vr...@apache.org>
Authored: Mon Aug 21 20:29:05 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 hadoop-project/src/site/markdown/index.md.vm    |   8 +-
 .../src/site/markdown/TimelineServiceV2.md      | 333 ++++++++++++++++++-
 2 files changed, 324 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea3e4df/hadoop-project/src/site/markdown/index.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/markdown/index.md.vm b/hadoop-project/src/site/markdown/index.md.vm
index 62e21b2..bb7bda2 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -55,17 +55,15 @@ documentation.
 YARN Timeline Service v.2
 -------------------
 
-We are introducing an early preview (alpha 1) of a major revision of YARN
+We are introducing an early preview (alpha 2) of a major revision of YARN
 Timeline Service: v.2. YARN Timeline Service v.2 addresses two major
 challenges: improving scalability and reliability of Timeline Service, and
 enhancing usability by introducing flows and aggregation.
 
-YARN Timeline Service v.2 alpha 1 is provided so that users and developers
+YARN Timeline Service v.2 alpha 2 is provided so that users and developers
 can test it and provide feedback and suggestions for making it a ready
 replacement for Timeline Service v.1.x. It should be used only in a test
-capacity. Most importantly, security is not enabled. Do not set up or use
-Timeline Service v.2 until security is implemented if security is a
-critical requirement.
+capacity.
 
 More details are available in the
 [YARN Timeline Service v.2](./hadoop-yarn/hadoop-yarn-site/TimelineServiceV2.html)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea3e4df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 678c406..2de305d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -73,12 +73,8 @@ The following diagram illustrates the design at a high level.
 
 ### <a name="Current_Status"></a>Current Status and Future Plans
 
-YARN Timeline Service v.2 is currently in alpha ("alpha 1"). It is very much work in progress, and
-many things can and will change rapidly. Users must enable Timeline Service v.2 only on a test or
-experimental cluster to test the feature.
-
-Most importantly, **security is not enabled**. Do not set up or use Timeline Service v.2 until
-security is implemented if security is a requirement.
+YARN Timeline Service v.2 is currently in alpha ("alpha 2"). It is a work in progress, and
+many things can and will change rapidly.
 
 A complete end-to-end flow of writes and reads is functional, with Apache HBase as the backend.
 You should be able to start generating data. When enabled, all YARN-generic events are
@@ -95,16 +91,19 @@ resource manager also has its dedicated in-process collector. The reader is curr
 instance. Currently, it is not possible to write to Timeline Service outside the context of a YARN
 application (i.e. no off-cluster client).
 
+Starting from alpha2, Timeline Service v.2 supports simple authorization in terms of a
+configurable whitelist of users and groups who can read timeline data. Cluster admins are
+allowed by default to read timeline data.
+
 When YARN Timeline Service v.2 is disabled, one can expect no functional or performance impact
 on any other existing functionality.
 
 The work to make it truly production-ready continues. Some key items include
 
 * More robust storage fault tolerance
-* Security
 * Support for off-cluster clients
-* More complete and integrated web UI
 * Better support for long-running apps
+* Support for ACLs
 * Offline (time-based periodic) aggregation for flows, users, and queues for reporting and
 analysis
 * Timeline collectors as separate instances from node managers
@@ -144,6 +143,27 @@ New configuration parameters that are introduced with v.2 are marked bold.
 | **`yarn.timeline-service.hbase.coprocessor.app-final-value-retention-milliseconds`** | The setting that controls how long the final value of a metric of a completed app is retained before merging into the flow sum. Defaults to `259200000` (3 days). This should be set in the HBase cluster. |
 | **`yarn.rm.system-metrics-publisher.emit-container-events`** | The setting that controls whether yarn container metrics is published to the timeline server or not by RM. This configuration setting is for ATS V2. Defaults to `false`. |
 
+#### Security Configuration
+
+
+Security can be enabled by setting `yarn.timeline-service.http-authentication.type`
+to `kerberos`, after which the following configuration options are available:
+
+
+| Configuration Property | Description |
+|:---- |:---- |
+| `yarn.timeline-service.http-authentication.type` | Defines authentication used for the timeline server(collector/reader) HTTP endpoint. Supported values are: `simple` / `kerberos` / #AUTHENTICATION_HANDLER_CLASSNAME#. Defaults to `simple`. |
+| `yarn.timeline-service.http-authentication.simple.anonymous.allowed` | Indicates if anonymous requests are allowed by the timeline server when using 'simple' authentication. Defaults to `true`. |
+| `yarn.timeline-service.http-authentication.kerberos.principal` | The Kerberos principal to be used for the Timeline Server(Collector/Reader) HTTP endpoint. |
+| `yarn.timeline-service.http-authentication.kerberos.keytab` | The Kerberos keytab to be used for the Timeline Server(Collector/Reader) HTTP endpoint.. |
+| `yarn.timeline-service.principal` | The Kerberos principal for the timeline reader. NM principal would be used for timeline collector as it runs as an auxiliary service inside NM. |
+| `yarn.timeline-service.keytab` | The Kerberos keytab for the timeline reader. NM keytab would be used for timeline collector as it runs as an auxiliary service inside NM. |
+| `yarn.timeline-service.delegation.key.update-interval` | Defaults to `86400000` (1 day). |
+| `yarn.timeline-service.delegation.token.renew-interval` | Defaults to `86400000` (1 day). |
+| `yarn.timeline-service.delegation.token.max-lifetime` | Defaults to `604800000` (7 days). |
+| `yarn.timeline-service.read.authentication.enabled` | Enables or disables authorization checks for reading timeline service v2 data. Default is `false` which is disabled. |
+| `yarn.timeline-service.read.allowed.users` | Comma separated list of user, followed by space, then comma separated list of groups. It will allow this list of users and groups to read the data and reject everyone else. Default value is set to none. If authorization is enabled, then this configuration is mandatory.  |
+
 #### Enabling CORS support
 To enable cross-origin support (CORS) for the Timeline Service v.2, please set the following configuration parameters:
 
@@ -170,7 +190,7 @@ Each step is explained in more detail below.
 
 ##### <a name="Set_up_the_HBase_cluster"> </a>Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.2.4. The 1.0.x versions
+version of Apache HBase that is supported with Timeline Service v.2 is 1.2.6. The 1.0.x versions
 do not work with Timeline Service v.2. Later versions of HBase have not been tested with
 Timeline Service.
 
@@ -333,6 +353,15 @@ To write MapReduce framework data to Timeline Service v.2, enable the following
 </property>
 ```
 
+### Upgrade from alpha1 to alpha2
+If you are currently running Timeline Service v2 alpha1 version, we recommend the following:
+
+- Clear existing data in tables (truncate tables) since the row key for AppToFlow has changed.
+
+- The coprocessor is now a dynamically loaded table level coprocessor in alpha2. We
+recommend dropping the table, replacing the coprocessor jar on hdfs with the alpha2 one,
+restarting the Region servers and recreating the `flowrun` table.
+
 ### <a name="Publishing_of_application_specific_data"></a> Publishing application specific data
 
 This section is for YARN application developers that want to integrate with Timeline Service v.2.
@@ -517,6 +546,8 @@ predicates, an empty list will be returned.
   "daterange=20150711-20150714" returns flows active between these 2 dates.<br/>
   "daterange=20150711-" returns flows active on and after 20150711.<br/>
   "daterange=-20150711" returns flows active on and before 20150711.<br/>
+1. `fromid` -  If specified, retrieve the next set of flows from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -562,6 +593,7 @@ predicates, an empty list will be returned.
         "info": {
           "SYSTEM_INFO_CLUSTER": "test-cluster",
           "UID": "test-cluster!sjlee!ds-date",
+          "FROM_ID": "test-cluster!1460419200000!sjlee!ds-date",
           "SYSTEM_INFO_FLOW_NAME": "ds-date",
           "SYSTEM_INFO_DATE": 1460419200000,
           "SYSTEM_INFO_USER": "sjlee"
@@ -613,6 +645,8 @@ predicates, an empty list will be returned.
 1. `fields` - Specifies which fields to retrieve. For querying flow runs, only `ALL` or `METRICS` are valid fields.
   Other fields will lead to HTTP 400 (Bad Request) response. If not specified, in response, id, type, createdtime and info fields
   will be returned.
+1. `fromid` -  If specified, retrieve the next set of flow run entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -625,6 +659,7 @@ predicates, an empty list will be returned.
         "createdtime": 1460420587974,
         "info": {
           "UID": "test-cluster!sjlee!ds-date!1460420587974",
+          "FROM_ID": "test-cluster!sjlee!ds-date!1460420587974",
           "SYSTEM_INFO_FLOW_RUN_ID": 1460420587974,
           "SYSTEM_INFO_FLOW_NAME": "ds-date",
           "SYSTEM_INFO_FLOW_RUN_END_TIME": 1460420595198,
@@ -641,6 +676,7 @@ predicates, an empty list will be returned.
         "createdtime": 1460420305659,
         "info": {
           "UID": "test-cluster!sjlee!ds-date!1460420305659",
+          "FROM_ID": "test-cluster!sjlee!ds-date!1460420305659",
           "SYSTEM_INFO_FLOW_RUN_ID": 1460420305659,
           "SYSTEM_INFO_FLOW_NAME": "ds-date",
           "SYSTEM_INFO_FLOW_RUN_END_TIME": 1460420311966,
@@ -712,6 +748,7 @@ while querying individual flow runs.
       "isrelatedto": {},
       "info": {
         "UID":"yarn-cluster!varun!QuasiMonteCarlo!1465246348599",
+        "FROM_ID":"yarn-cluster!varun!QuasiMonteCarlo!1465246348599",
         "SYSTEM_INFO_FLOW_RUN_END_TIME":1465246378051,
         "SYSTEM_INFO_FLOW_NAME":"QuasiMonteCarlo",
         "SYSTEM_INFO_USER":"varun",
@@ -816,6 +853,10 @@ none of the apps match the predicates, an empty list will be returned.
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of application entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -830,6 +871,7 @@ none of the apps match the predicates, an empty list will be returned.
         "configs": { },
         "info": {
           "UID": "yarn-cluster!application_1465246237936_0001"
+          "FROM_ID": "yarn-cluster!varun!QuasiMonteCarlo!1465246348599!application_1465246237936_0001",
         },
         "relatesto": { }
       },
@@ -843,6 +885,7 @@ none of the apps match the predicates, an empty list will be returned.
         "configs": { },
         "info": {
           "UID": "yarn-cluster!application_1464983628730_0005"
+          "FROM_ID": "yarn-cluster!varun!QuasiMonteCarlo!1465246348599!application_1464983628730_0005",
         },
         "relatesto": { }
       }
@@ -943,6 +986,10 @@ match the predicates, an empty list will be returned.
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of application entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -955,6 +1002,7 @@ match the predicates, an empty list will be returned.
         "createdtime": 1460419580171,
         "info": {
           "UID": "test-cluster!sjlee!ds-date!1460419580171!application_1460419579913_0002"
+          "FROM_ID": "test-cluster!sjlee!ds-date!1460419580171!application_1460419579913_0002",
         },
         "configs": {},
         "isrelatedto": {},
@@ -1019,6 +1067,8 @@ and app id.
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
 
 #### Example JSON Response:
 
@@ -1043,7 +1093,7 @@ and app id.
 1. If flow context information cannot be retrieved or application for the given app id cannot be found, HTTP 404 (Not Found) is returned.
 1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
 
-### <a name="REST_API_LIST_ENTITIES"></a>Query generic entities
+### <a name="REST_API_LIST_ENTITIES_WITH_IN_SCOPE_OF_APPLICATION"></a>Query generic entities with in the scope of Application
 
 With this API, you can query generic entities identified by cluster ID, application ID and
 per-framework entity type. If the REST endpoint without the cluster name is used, the cluster
@@ -1143,6 +1193,10 @@ If none of the entities match the predicates, an empty list will be returned.
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of generic entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
 
 #### Example JSON Response:
 
@@ -1157,6 +1211,7 @@ If none of the entities match the predicates, an empty list will be returned.
         "configs": { },
         "info": {
           "UID": "yarn-cluster!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!appattempt_1465246237936_0001_000001"
+          "FROM_ID": "yarn-cluster!sjlee!ds-date!1460419580171!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!0!appattempt_1465246237936_0001_000001"
         },
         "relatesto": { }
       },
@@ -1170,6 +1225,7 @@ If none of the entities match the predicates, an empty list will be returned.
         "configs": { },
         "info": {
           "UID": "yarn-cluster!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!appattempt_1465246237936_0001_000002"
+          "FROM_ID": "yarn-cluster!sjlee!ds-date!1460419580171!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!0!appattempt_1465246237936_0001_000002"
         },
         "relatesto": { }
       }
@@ -1182,7 +1238,142 @@ If none of the entities match the predicates, an empty list will be returned.
 1. If flow context information cannot be retrieved, HTTP 404 (Not Found) is returned.
 1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
 
-### <a name="REST_API_LIST_ENTITY"></a>Query generic entity
+### <a name="REST_API_LIST_ENTITIES"></a>Query generic entities.
+
+With this API, you can query generic entities per user identified by cluster ID, doAsUser and
+entity type. If the REST endpoint without the cluster name is used, the cluster
+specified by the configuration `yarn.resourcemanager.cluster-id` in `yarn-site.xml` is taken.
+If number of matching entities are more than the limit, the most recent
+entities up to the limit will be returned. This endpoint can be used to query generic entity which
+clients put into the backend. For instance, we can query user entities by specifying entity type as `TEZ_DAG_ID`.
+If none of the entities match the predicates, an empty list will be returned.
+**Note** : As of today, we can query only those entities which are published with doAsUser which is different from application owner.
+
+#### HTTP request:
+
+    GET /ws/v2/timeline/clusters/{cluster name}/users/{userid}/entities/{entitytype}
+
+    or
+
+    GET /ws/v2/timeline/users/{userid}/entities/{entitytype}
+
+#### Query Parameters Supported:
+
+1. `limit` - If specified, defines the number of entities to return. The maximum possible value for limit is maximum value of Long. If it is not specified
+  or has a value less than 0, then limit will be considered as 100.
+1. `createdtimestart` - If specified, then only entities created after this timestamp are returned.
+1. `createdtimeend` -  If specified, then only entities created before this timestamp are returned.
+1. `relatesto` - If specified, matched entities must relate to or not relate to given entities associated with a entity type.
+  relatesto is represented as an expression of the form :<br/>
+  "(&lt;entitytype&gt;:&lt;entityid&gt;:&lt;entityid&gt;...,&lt;entitytype&gt;:&lt;entityid&gt;:&lt;entityid&gt;...) &lt;op&gt; !(&lt;entitytype&gt;:&lt;entityid&gt;:&lt;entityid&gt;...,&lt;entitytype&gt;:&lt;entityid&gt;:&lt;entityid&gt;...)".<br/>
+  If relatesto expression has entity type - entity id(s) relations specified within enclosing brackets proceeding "!", this means entities with
+  these relations in its relatesto field, will not be returned. For expressions or subexpressions without "!", all entities which have the specified
+  relations in its relatesto field, will be returned. "op" is a logical operator and can be either AND or OR. entity type can be followed by any number
+  of entity id(s). And we can combine any number of ANDs' and ORs' to create complex expressions. Brackets can be used to club expressions together.<br/>
+  _For example_ : relatesto can be "(((type1:id1:id2:id3,type3:id9) AND !(type2:id7:id8)) OR (type1:id4))".<br/>
+  Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `isrelatedto` - If specified, matched entities must be related to or not related to given entities associated with a entity type. isrelatedto is
+  represented in the same form as relatesto.
+1. `infofilters` - If specified, matched entities must have exact matches to the given info key and must be either equal or not equal to
+  given value. The info key is a string but value can be any object. infofilters are represented as an expression of the form :<br/>
+  "(&lt;key&gt; &lt;compareop&gt; &lt;value&gt;) &lt;op&gt; (&lt;key&gt; &lt;compareop&gt; &lt;value&gt;)".<br/>
+  Here op can be either of AND or OR. And compareop can be either of "eq", "ne" or "ene".<br/>
+  "eq" means equals, "ne" means not equals and existence of key is not required for a match and "ene" means not equals but existence of key is
+  required. We can combine any number of ANDs' and ORs' to create complex expressions.  Brackets can be used to club expressions together.<br/>
+  _For example_ : infofilters can be "(((infokey1 eq value1) AND (infokey2 ne value1)) OR (infokey1 ene value3))".<br/>
+  Note : If value is an object then value can be given in the form of JSON format without any space.<br/>
+  _For example_ : infofilters can be (infokey1 eq {"&lt;key&gt;":"&lt;value&gt;","&lt;key&gt;":"&lt;value&gt;"...}).<br/>
+  Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `conffilters` - If specified, matched entities must have exact matches to the given config name and must be either equal or not equal
+  to the given config value. Both the config name and value must be strings. conffilters are represented in the same form as infofilters.
+1. `metricfilters` - If specified, matched entities must have exact matches to the given metric and satisfy the specified relation with the
+  metric value. Metric id must be a string and and metric value must be an integral value.  metricfilters are represented as an expression of the form :<br/>
+  "(&lt;metricid&gt; &lt;compareop&gt; &lt;metricvalue&gt;) &lt;op&gt; (&lt;metricid&gt; &lt;compareop&gt; &lt;metricvalue&gt;)"<br/>
+  Here op can be either of AND or OR. And compareop can be either of "eq", "ne", "ene", "gt", "ge", "lt" and "le".<br/>
+  "eq" means equals, "ne" means not equals and existence of metric is not required for a match, "ene" means not equals but existence of metric is
+  required, "gt" means greater than, "ge" means greater than or equals, "lt" means less than and "le" means less than or equals. We can combine
+  any number of ANDs' and ORs' to create complex expressions.  Brackets can be used to club expressions together.<br/>
+  _For example_ : metricfilters can be "(((metric1 eq 50) AND (metric2 gt 40)) OR (metric1 lt 20))".<br/>
+  This in essence is an expression equivalent to "(metric1 == 50 AND metric2 &gt; 40) OR (metric1 &lt; 20)"<br/>
+  Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `eventfilters` - If specified, matched entities must contain or not contain the given events depending on the expression. eventfilters is
+  represented as an expression of the form :<br/>
+  "(&lt;eventid&gt;,&lt;eventid&gt;) &lt;op&gt; !(&lt;eventid&gt;,&lt;eventid&gt;,&lt;eventid&gt;)".<br/>
+  Here, "!" means none of the comma-separated list of events within the enclosed brackets proceeding "!" must exist for a match to occur.
+  If "!" is not specified, the specified events within the enclosed brackets must exist. op is a logical operator and can be either AND or OR.
+  We can combine any number of ANDs' and ORs' to create complex expressions. Brackets can be used to club expressions together.<br/>
+  _For example_ : eventfilters can be "(((event1,event2) AND !(event4)) OR (event3,event7,event5))".<br/>
+  Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `metricstoretrieve` - If specified, defines which metrics to retrieve or which ones not to retrieve and send back in response.
+  metricstoretrieve can be an expression of the form :<br/>
+  (&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;...)<br/>
+  This specifies a comma separated list of metric id prefixes. Only metrics matching any of the prefixes will be retrieved. Brackets are optional for
+  the simple expression. Alternatively, expressions can be of the form:<br/>
+  !(&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;...)<br/>
+  This specifies a comma separated list of metric id prefixes. Only metrics not matching any of the prefixes will be retrieved.<br/>
+  If metricstoretrieve is specified, metrics will be retrieved irrespective of whether `METRICS` is specified in fields query param
+  or not. Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `confstoretrieve` - If specified, defines which configs to retrieve or which ones not to retrieve and send back in response.
+  confstoretrieve can be an expression of the form :<br/>
+  (&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;...)<br/>
+  This specifies a comma separated list of config name prefixes. Only configs matching any of the prefixes will be retrieved. Brackets are optional for
+  the simple expression. Alternatively, expressions can be of the form:<br/>
+  !(&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;...)<br/>
+  This specifies a comma separated list of config name prefixes. Only configs not matching any of the prefixes will be retrieved.<br/>
+  If confstoretrieve is specified, configs will be retrieved irrespective of whether `CONFIGS` is specified in fields query param
+  or not. Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `fields` - Specifies which fields to retrieve. Possible values for fields can be `EVENTS`, `INFO`, `CONFIGS`, `METRICS`, `RELATES_TO`,
+ `IS_RELATED_TO` and `ALL`. All fields will be retrieved if `ALL` is specified. Multiple fields can be specified as a comma-separated list.
+  If fields is not specified, in response, entity id, entity type, createdtime and UID in info field will be returned.
+1. `metricslimit` - If specified, defines the number of metrics to return. Considered only if fields contains METRICS/ALL
+  or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
+  Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
+  considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of generic entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
+
+#### Example JSON Response:
+
+    [
+      {
+        "metrics": [ ],
+        "events": [ ],
+        "type": "TEZ_DAG_ID",
+        "id": "dag_1465246237936_0001_000001",
+        "createdtime": 1465246358873,
+        "isrelatedto": { },
+        "configs": { },
+        "info": {
+          "UID": "yarn-cluster!sjlee!TEZ_DAG_ID!0!dag_1465246237936_0001_000001"
+          "FROM_ID": "sjlee!yarn-cluster!TEZ_DAG_ID!0!dag_1465246237936_0001_000001"
+        },
+        "relatesto": { }
+      },
+      {
+        "metrics": [ ],
+        "events": [ ],
+        "type": "TEZ_DAG_ID",
+        "id": "dag_1465246237936_0001_000002",
+        "createdtime": 1465246359045,
+        "isrelatedto": { },
+        "configs": { },
+        "info": {
+          "UID": "yarn-cluster!sjlee!TEZ_DAG_ID!0!dag_1465246237936_0001_000002!userX"
+          "FROM_ID": "sjlee!yarn-cluster!TEZ_DAG_ID!0!dag_1465246237936_0001_000002!userX"
+        },
+        "relatesto": { }
+      }
+    ]
+
+#### Response Codes
+
+1. If successful, a HTTP 200(OK) response is returned.
+1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
+1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
+
+### <a name="REST_API_LIST_ENTITY"></a>Query generic entity with in the scope of Application
 
 With this API, you can query a specific generic entity identified by cluster ID, application ID,
 per-framework entity type and entity ID. If the REST endpoint without the cluster name is used, the
@@ -1236,6 +1427,9 @@ container ID. Similarly, application attempt can be queried by specifying entity
   or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
   Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
   considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `entityidprefix` Defines the id prefix for the entity to be fetched. If specified, then entity retrieval will be faster.
 
 #### Example JSON Response:
 
@@ -1248,7 +1442,8 @@ container ID. Similarly, application attempt can be queried by specifying entity
       "isrelatedto": { },
       "configs": { },
       "info": {
-        "UID": "yarn-cluster!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!appattempt_1465246237936_0001_000001"
+        "UID": "yarn-cluster!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!0!appattempt_1465246237936_0001_000001"
+        "FROM_ID": "yarn-cluster!sjlee!ds-date!1460419580171!application_1465246237936_0001!YARN_APPLICATION_ATTEMPT!0!appattempt_1465246237936_0001_000001"
       },
       "relatesto": { }
     }
@@ -1259,3 +1454,117 @@ container ID. Similarly, application attempt can be queried by specifying entity
 1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
 1. If flow context information cannot be retrieved or entity for the given entity id cannot be found, HTTP 404 (Not Found) is returned.
 1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
+
+### <a name="REST_API_LIST_ENTITIES"></a>Query generic entity.
+
+With this API, you can query generic entity per user identified by cluster ID, doAsUser and
+entity type and entity ID. If the REST endpoint without the cluster name is used, the cluster
+specified by the configuration `yarn.resourcemanager.cluster-id` in `yarn-site.xml` is taken.
+If number of matching entities are more than the limit, the most recent
+entities up to the limit will be returned. This endpoint can be used to query generic entity which
+clients put into the backend. For instance, we can query user entities by specifying entity type as `TEZ_DAG_ID`.
+If none of the entities match the predicates, an empty list will be returned.
+**Note** : As of today, we can query only those entities which are published with doAsUser which is different from application owner.
+
+#### HTTP request:
+
+    GET /ws/v2/timeline/clusters/{cluster name}/users/{userid}/entities/{entitytype}/{entityid}
+
+    or
+
+    GET /ws/v2/timeline/users/{userid}/entities/{entitytype}/{entityid}
+
+#### Query Parameters Supported:
+
+1. `metricstoretrieve` - If specified, defines which metrics to retrieve or which ones not to retrieve and send back in response.
+  metricstoretrieve can be an expression of the form :<br/>
+  (&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;...)<br/>
+  This specifies a comma separated list of metric id prefixes. Only metrics matching any of the prefixes will be retrieved. Brackets are optional for
+  the simple expression. Alternatively, expressions can be of the form:<br/>
+  !(&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;,&lt;metricprefix&gt;...)<br/>
+  This specifies a comma separated list of metric id prefixes. Only metrics not matching any of the prefixes will be retrieved.<br/>
+  If metricstoretrieve is specified, metrics will be retrieved irrespective of whether `METRICS` is specified in fields query param
+  or not. Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `confstoretrieve` - If specified, defines which configs to retrieve or which ones not to retrieve and send back in response.
+  confstoretrieve can be an expression of the form :<br/>
+  (&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;...)<br/>
+  This specifies a comma separated list of config name prefixes. Only configs matching any of the prefixes will be retrieved. Brackets are optional for
+  the simple expression. Alternatively, expressions can be of the form:<br/>
+  !(&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;,&lt;config\_name\_prefix&gt;...)<br/>
+  This specifies a comma separated list of config name prefixes. Only configs not matching any of the prefixes will be retrieved.<br/>
+  If confstoretrieve is specified, configs will be retrieved irrespective of whether `CONFIGS` is specified in fields query param
+  or not. Please note that URL unsafe characters such as spaces will have to be suitably encoded.
+1. `fields` - Specifies which fields to retrieve. Possible values for fields can be `EVENTS`, `INFO`, `CONFIGS`, `METRICS`, `RELATES_TO`,
+ `IS_RELATED_TO` and `ALL`. All fields will be retrieved if `ALL` is specified. Multiple fields can be specified as a comma-separated list.
+  If fields is not specified, in response, entity id, entity type, createdtime and UID in info field will be returned.
+1. `metricslimit` - If specified, defines the number of metrics to return. Considered only if fields contains METRICS/ALL
+  or metricstoretrieve is specified. Ignored otherwise. The maximum possible value for metricslimit can be maximum value of
+  Integer. If it is not specified or has a value less than 1, and metrics have to be retrieved, then metricslimit will be
+  considered as 1 i.e. latest single value of metric(s) will be returned.
+1. `metricsTimeStart` - If specified, then metrics for the entity after this timestamp are returned.
+1. `metricsTimeEnd` - If specified, then metrics for the entity before this timestamp are returned.
+1. `fromid` -  If specified, retrieve the next set of generic entities from the given fromid. The set of entities retrieved is inclusive of specified fromid.
+   fromid should be taken from the value associated with FROM_ID info key in flow entity response which was sent earlier.
+
+#### Example JSON Response:
+
+    [
+      {
+        "metrics": [ ],
+        "events": [ ],
+        "type": "TEZ_DAG_ID",
+        "id": "dag_1465246237936_0001_000001",
+        "createdtime": 1465246358873,
+        "isrelatedto": { },
+        "configs": { },
+        "info": {
+          "UID": "yarn-cluster!sjlee!TEZ_DAG_ID!0!dag_1465246237936_0001_000001!userX"
+          "FROM_ID": "sjlee!yarn-cluster!TEZ_DAG_ID!0!dag_1465246237936_0001_000001!userX"
+        },
+        "relatesto": { }
+      }
+    ]
+
+#### Response Codes
+
+1. If successful, a HTTP 200(OK) response is returned.
+1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
+1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
+
+### <a name="REST_API_LIST_ENTITY_TYPES"></a>Query generic entity types
+
+With this API, you can query set of available entity types for a given app id. If the REST endpoint without the cluster name is used, the cluster specified by the configuration yarn.resourcemanager.cluster-id in yarn-site.xml is taken.  If userid, flow name and flow run id which are optional query parameters are not specified, they will be queried based on app id and cluster id from the flow context information stored in underlying storage implementation.
+
+#### HTTP request:
+
+    GET /ws/v2/timeline/apps/{appid}/entity-types
+
+    or
+
+    GET /ws/v2/timeline/clusters/{clusterid}/apps/{appid}/entity-types
+
+#### Query Parameters Supported:
+
+1. `userid` -  If specified, entity must belong to this user. This query param must be specified along with flowname and flowrunid query params, otherwise it will be ignored.
+  If userid, flowname and flowrunid are not specified then timeline reader will fetch flow context information based on cluster and appid while executing the query.
+1. `flowname` - If specified, entity must belong to this flow name. This query param must be specified along with userid and flowrunid query params, otherwise it will be ignored.
+  If userid, flowname and flowrunid are not specified, we would have to fetch flow context information based on cluster and appid while executing the query.
+1. `flowrunid` - If specified, entity must belong to this flow run id. This query param must be specified along with userid and flowname query params, otherwise it will be ignored.
+  If userid, flowname and flowrunid are not specified, we would have to fetch flow context information based on cluster and appid while executing the query.
+
+#### Example JSON Response:
+
+    {
+      YARN_APPLICATION_ATTEMPT,
+      YARN_CONTAINER,
+      MAPREDUCE_JOB,
+      MAPREDUCE_TASK,
+      MAPREDUCE_TASK_ATTEMPT
+    }
+
+#### Response Codes
+
+1. If successful, a HTTP 200 (OK) response is returned.
+1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
+1. If flow context information cannot be retrieved or entity for the given entity id cannot be found, HTTP 404 (Not Found) is returned.
+1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: YARN-6134. [ATSv2 Security] Regenerate delegation token for app just before token expires if app collector is active. Contributed by Varun Saxena

Posted by st...@apache.org.
YARN-6134. [ATSv2 Security] Regenerate delegation token for app just before token expires if app collector is active. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fd6ae24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fd6ae24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fd6ae24

Branch: refs/heads/HADOOP-13345
Commit: 7fd6ae24798cd3fdd77dbb00089a922407026e02
Parents: e276c75
Author: Jian He <ji...@apache.org>
Authored: Fri Aug 18 23:20:44 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 .../security/TestTimelineAuthFilterForV2.java   | 104 ++++++++++++++--
 .../collector/AppLevelTimelineCollector.java    |  35 ++++--
 .../collector/NodeTimelineCollectorManager.java | 118 ++++++++++++++-----
 ...neV2DelegationTokenSecretManagerService.java |  11 ++
 4 files changed, 222 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd6ae24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index f1d5185..bc1594c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.timelineservice.security;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -27,6 +28,7 @@ import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.atLeastOnce;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -51,6 +53,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
@@ -190,6 +194,10 @@ public class TestTimelineAuthFilterForV2 {
       // renewed automatically if app is still alive.
       conf.setLong(
           YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL, 100);
+      // Set token max lifetime to 4 seconds to test if timeline delegation
+      // token for the app is regenerated automatically if app is still alive.
+      conf.setLong(
+          YarnConfiguration.TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME, 4000);
     }
     UserGroupInformation.setConfiguration(conf);
     collectorManager = new DummyNodeTimelineCollectorManager();
@@ -205,9 +213,8 @@ public class TestTimelineAuthFilterForV2 {
     if (!withKerberosLogin) {
       AppLevelTimelineCollector collector =
           (AppLevelTimelineCollector)collectorManager.get(appId);
-      org.apache.hadoop.security.token.Token
-          <TimelineDelegationTokenIdentifier> token =
-              collector.getDelegationTokenForApp();
+      Token<TimelineDelegationTokenIdentifier> token =
+          collector.getDelegationTokenForApp();
       token.setService(new Text("localhost" + token.getService().toString().
           substring(token.getService().toString().indexOf(":"))));
       UserGroupInformation.getCurrentUser().addToken(token);
@@ -304,6 +311,20 @@ public class TestTimelineAuthFilterForV2 {
     }
   }
 
+  private boolean publishWithRetries(ApplicationId appId, File entityTypeDir,
+      String entityType, int numEntities) throws Exception {
+    for (int i = 0; i < 10; i++) {
+      try {
+        publishAndVerifyEntity(appId, entityTypeDir, entityType, numEntities);
+      } catch (YarnException e) {
+        Thread.sleep(50);
+        continue;
+      }
+      return true;
+    }
+    return false;
+  }
+
   @Test
   public void testPutTimelineEntities() throws Exception {
     final String entityType = "dummy_type";
@@ -325,17 +346,63 @@ public class TestTimelineAuthFilterForV2 {
           }
         });
       } else {
-        publishAndVerifyEntity(appId, entityTypeDir, entityType, 1);
+        assertTrue("Entities should have been published successfully.",
+            publishWithRetries(appId, entityTypeDir, entityType, 1));
+
+        AppLevelTimelineCollector collector =
+            (AppLevelTimelineCollector) collectorManager.get(appId);
+        Token<TimelineDelegationTokenIdentifier> token =
+            collector.getDelegationTokenForApp();
+        assertNotNull(token);
+
         // Verify if token is renewed automatically and entities can still be
         // published.
         Thread.sleep(1000);
-        publishAndVerifyEntity(appId, entityTypeDir, entityType, 2);
-        AppLevelTimelineCollector collector =
-            (AppLevelTimelineCollector) collectorManager.get(appId);
+        // Entities should publish successfully after renewal.
+        assertTrue("Entities should have been published successfully.",
+            publishWithRetries(appId, entityTypeDir, entityType, 2));
         assertNotNull(collector);
         verify(collectorManager.getTokenManagerService(), atLeastOnce()).
             renewToken(eq(collector.getDelegationTokenForApp()),
                 any(String.class));
+
+        // Wait to ensure lifetime of token expires and ensure its regenerated
+        // automatically.
+        Thread.sleep(3000);
+        for (int i = 0; i < 40; i++) {
+          if (!token.equals(collector.getDelegationTokenForApp())) {
+            break;
+          }
+          Thread.sleep(50);
+        }
+        assertNotEquals("Token should have been regenerated.", token,
+            collector.getDelegationTokenForApp());
+        Thread.sleep(1000);
+        // Try publishing with the old token in UGI. Publishing should fail due
+        // to invalid token.
+        try {
+          publishAndVerifyEntity(appId, entityTypeDir, entityType, 2);
+          fail("Exception should have been thrown due to Invalid Token.");
+        } catch (YarnException e) {
+          assertTrue("Exception thrown should have been due to Invalid Token.",
+              e.getCause().getMessage().contains("InvalidToken"));
+        }
+
+        // Update the regenerated token in UGI and retry publishing entities.
+        Token<TimelineDelegationTokenIdentifier> regeneratedToken =
+            collector.getDelegationTokenForApp();
+        regeneratedToken.setService(new Text("localhost" +
+            regeneratedToken.getService().toString().substring(
+            regeneratedToken.getService().toString().indexOf(":"))));
+        UserGroupInformation.getCurrentUser().addToken(regeneratedToken);
+        assertTrue("Entities should have been published successfully.",
+                 publishWithRetries(appId, entityTypeDir, entityType, 2));
+        // Token was generated twice, once when app collector was created and
+        // later after token lifetime expiry.
+        verify(collectorManager.getTokenManagerService(), times(2)).
+            generateToken(any(UserGroupInformation.class), any(String.class));
+        assertEquals(1, ((DummyNodeTimelineCollectorManager) collectorManager).
+            getTokenExpiredCnt());
       }
       // Wait for async entity to be published.
       for (int i = 0; i < 50; i++) {
@@ -359,14 +426,35 @@ public class TestTimelineAuthFilterForV2 {
 
   private static class DummyNodeTimelineCollectorManager extends
       NodeTimelineCollectorManager {
+    private volatile int tokenExpiredCnt = 0;
     DummyNodeTimelineCollectorManager() {
       super();
     }
 
+    private int getTokenExpiredCnt() {
+      return tokenExpiredCnt;
+    }
+
     @Override
     protected TimelineV2DelegationTokenSecretManagerService
         createTokenManagerService() {
-      return spy(new TimelineV2DelegationTokenSecretManagerService());
+      return spy(new TimelineV2DelegationTokenSecretManagerService() {
+        @Override
+        protected AbstractDelegationTokenSecretManager
+            <TimelineDelegationTokenIdentifier>
+            createTimelineDelegationTokenSecretManager(long secretKeyInterval,
+                long tokenMaxLifetime, long tokenRenewInterval,
+                long tokenRemovalScanInterval) {
+          return spy(new TimelineV2DelegationTokenSecretManager(
+              secretKeyInterval, tokenMaxLifetime, tokenRenewInterval, 2000L) {
+            @Override
+            protected void logExpireToken(
+                TimelineDelegationTokenIdentifier ident) throws IOException {
+              tokenExpiredCnt++;
+            }
+          });
+        }
+      });
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd6ae24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index 8b8534e..38221fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -51,7 +51,9 @@ public class AppLevelTimelineCollector extends TimelineCollector {
   private final TimelineCollectorContext context;
   private UserGroupInformation currentUser;
   private Token<TimelineDelegationTokenIdentifier> delegationTokenForApp;
-  private Future<?> renewalFuture;
+  private long tokenMaxDate = 0;
+  private String tokenRenewer;
+  private Future<?> renewalOrRegenerationFuture;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
     this(appId, null);
@@ -75,13 +77,32 @@ public class AppLevelTimelineCollector extends TimelineCollector {
 
   void setDelegationTokenAndFutureForApp(
       Token<TimelineDelegationTokenIdentifier> token,
-      Future<?> appRenewalFuture) {
+      Future<?> appRenewalOrRegenerationFuture, long tknMaxDate,
+      String renewer) {
     this.delegationTokenForApp = token;
-    this.renewalFuture = appRenewalFuture;
+    this.tokenMaxDate = tknMaxDate;
+    this.tokenRenewer = renewer;
+    this.renewalOrRegenerationFuture = appRenewalOrRegenerationFuture;
   }
 
-  void setRenewalFutureForApp(Future<?> appRenewalFuture) {
-    this.renewalFuture = appRenewalFuture;
+  void setRenewalOrRegenerationFutureForApp(
+      Future<?> appRenewalOrRegenerationFuture) {
+    this.renewalOrRegenerationFuture = appRenewalOrRegenerationFuture;
+  }
+
+  void cancelRenewalOrRegenerationFutureForApp() {
+    if (renewalOrRegenerationFuture != null &&
+        !renewalOrRegenerationFuture.isDone()) {
+      renewalOrRegenerationFuture.cancel(true);
+    }
+  }
+
+  long getAppDelegationTokenMaxDate() {
+    return tokenMaxDate;
+  }
+
+  String getAppDelegationTokenRenewer() {
+    return tokenRenewer;
   }
 
   @VisibleForTesting
@@ -109,9 +130,7 @@ public class AppLevelTimelineCollector extends TimelineCollector {
 
   @Override
   protected void serviceStop() throws Exception {
-    if (renewalFuture != null && !renewalFuture.isDone()) {
-      renewalFuture.cancel(true);
-    }
+    cancelRenewalOrRegenerationFutureForApp();
     super.serviceStop();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd6ae24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
index 1b1b373..68a68f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
@@ -87,6 +87,8 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
 
   private static final long TIME_BEFORE_RENEW_DATE = 10 * 1000; // 10 seconds.
 
+  private static final long TIME_BEFORE_EXPIRY = 5 * 60 * 1000; // 5 minutes.
+
   static final String COLLECTOR_MANAGER_ATTR_KEY = "collector.manager";
 
   @VisibleForTesting
@@ -176,10 +178,8 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
   public long renewTokenForAppCollector(
       AppLevelTimelineCollector appCollector) throws IOException {
     if (appCollector.getDelegationTokenForApp() != null) {
-      TimelineDelegationTokenIdentifier identifier =
-          appCollector.getDelegationTokenForApp().decodeIdentifier();
       return tokenMgrService.renewToken(appCollector.getDelegationTokenForApp(),
-          identifier.getRenewer().toString());
+          appCollector.getAppDelegationTokenRenewer());
     } else {
       LOG.info("Delegation token not available for renewal for app " +
           appCollector.getTimelineEntityContext().getAppId());
@@ -196,6 +196,42 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
     }
   }
 
+  private long getRenewalDelay(long renewInterval) {
+    return ((renewInterval > TIME_BEFORE_RENEW_DATE) ?
+        renewInterval - TIME_BEFORE_RENEW_DATE : renewInterval);
+  }
+
+  private long getRegenerationDelay(long tokenMaxDate) {
+    long regenerateTime = tokenMaxDate - Time.now();
+    return ((regenerateTime > TIME_BEFORE_EXPIRY) ?
+        regenerateTime - TIME_BEFORE_EXPIRY : regenerateTime);
+  }
+
+  private org.apache.hadoop.yarn.api.records.Token generateTokenAndSetTimer(
+      ApplicationId appId, AppLevelTimelineCollector appCollector)
+      throws IOException {
+    Token<TimelineDelegationTokenIdentifier> timelineToken =
+        generateTokenForAppCollector(appCollector.getAppUser());
+    TimelineDelegationTokenIdentifier tokenId =
+        timelineToken.decodeIdentifier();
+    long renewalDelay = getRenewalDelay(tokenRenewInterval);
+    long regenerationDelay = getRegenerationDelay(tokenId.getMaxDate());
+    if (renewalDelay > 0 || regenerationDelay > 0) {
+      boolean isTimerForRenewal = renewalDelay < regenerationDelay;
+      Future<?> renewalOrRegenerationFuture = tokenRenewalExecutor.schedule(
+          new CollectorTokenRenewer(appId, isTimerForRenewal),
+          isTimerForRenewal? renewalDelay : regenerationDelay,
+          TimeUnit.MILLISECONDS);
+      appCollector.setDelegationTokenAndFutureForApp(timelineToken,
+          renewalOrRegenerationFuture, tokenId.getMaxDate(),
+          tokenId.getRenewer().toString());
+    }
+    LOG.info("Generated a new token " + timelineToken + " for app " + appId);
+    return org.apache.hadoop.yarn.api.records.Token.newInstance(
+        timelineToken.getIdentifier(), timelineToken.getKind().toString(),
+        timelineToken.getPassword(), timelineToken.getService().toString());
+  }
+
   @Override
   protected void doPostPut(ApplicationId appId, TimelineCollector collector) {
     try {
@@ -206,19 +242,8 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
       if (UserGroupInformation.isSecurityEnabled() &&
           collector instanceof AppLevelTimelineCollector) {
         AppLevelTimelineCollector appCollector =
-            (AppLevelTimelineCollector)collector;
-        Token<TimelineDelegationTokenIdentifier> timelineToken =
-            generateTokenForAppCollector(appCollector.getAppUser());
-        long renewalDelay = (tokenRenewInterval > TIME_BEFORE_RENEW_DATE) ?
-            tokenRenewInterval - TIME_BEFORE_RENEW_DATE : tokenRenewInterval;
-        Future<?> renewalFuture =
-            tokenRenewalExecutor.schedule(new CollectorTokenRenewer(appId),
-                renewalDelay, TimeUnit.MILLISECONDS);
-        appCollector.setDelegationTokenAndFutureForApp(timelineToken,
-            renewalFuture);
-        token = org.apache.hadoop.yarn.api.records.Token.newInstance(
-            timelineToken.getIdentifier(), timelineToken.getKind().toString(),
-            timelineToken.getPassword(), timelineToken.getService().toString());
+            (AppLevelTimelineCollector) collector;
+        token = generateTokenAndSetTimer(appId, appCollector);
       }
       // Report to NM if a new collector is added.
       reportNewCollectorInfoToNM(appId, token);
@@ -365,16 +390,54 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
 
   private final class CollectorTokenRenewer implements Runnable {
     private ApplicationId appId;
-    private CollectorTokenRenewer(ApplicationId applicationId) {
+    // Indicates whether timer is for renewal or regeneration of token.
+    private boolean timerForRenewal = true;
+    private CollectorTokenRenewer(ApplicationId applicationId,
+        boolean forRenewal) {
       appId = applicationId;
+      timerForRenewal = forRenewal;
+    }
+
+    private void renewToken(AppLevelTimelineCollector appCollector)
+        throws IOException {
+      long newExpirationTime = renewTokenForAppCollector(appCollector);
+      // Set renewal or regeneration timer based on delay.
+      long renewalDelay = 0;
+      if (newExpirationTime > 0) {
+        LOG.info("Renewed token for " + appId + " with new expiration " +
+            "timestamp = " + newExpirationTime);
+        renewalDelay = getRenewalDelay(newExpirationTime - Time.now());
+      }
+      long regenerationDelay =
+          getRegenerationDelay(appCollector.getAppDelegationTokenMaxDate());
+      if (renewalDelay > 0 || regenerationDelay > 0) {
+        this.timerForRenewal = renewalDelay < regenerationDelay;
+        Future<?> renewalOrRegenerationFuture = tokenRenewalExecutor.schedule(
+            this, timerForRenewal ? renewalDelay : regenerationDelay,
+            TimeUnit.MILLISECONDS);
+        appCollector.setRenewalOrRegenerationFutureForApp(
+            renewalOrRegenerationFuture);
+      }
+    }
+
+    private void regenerateToken(AppLevelTimelineCollector appCollector)
+        throws IOException {
+      org.apache.hadoop.yarn.api.records.Token token =
+          generateTokenAndSetTimer(appId, appCollector);
+      // Report to NM if a new collector is added.
+      try {
+        reportNewCollectorInfoToNM(appId, token);
+      } catch (YarnException e) {
+        LOG.warn("Unable to report regenerated token to NM for " + appId);
+      }
     }
 
     @Override
     public void run() {
       TimelineCollector collector = get(appId);
       if (collector == null) {
-        LOG.info("Cannot find active collector while renewing token for " +
-            appId);
+        LOG.info("Cannot find active collector while " + (timerForRenewal ?
+            "renewing" : "regenerating") + " token for " + appId);
         return;
       }
       AppLevelTimelineCollector appCollector =
@@ -383,19 +446,14 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
       synchronized (collector) {
         if (!collector.isStopped()) {
           try {
-            long newExpirationTime = renewTokenForAppCollector(appCollector);
-            if (newExpirationTime > 0) {
-              long renewInterval = newExpirationTime - Time.now();
-              long renewalDelay = (renewInterval > TIME_BEFORE_RENEW_DATE) ?
-                  renewInterval - TIME_BEFORE_RENEW_DATE : renewInterval;
-              LOG.info("Renewed token for " + appId + " with new expiration " +
-                  "timestamp = " + newExpirationTime);
-              Future<?> renewalFuture = tokenRenewalExecutor.schedule(
-                  this, renewalDelay, TimeUnit.MILLISECONDS);
-              appCollector.setRenewalFutureForApp(renewalFuture);
+            if (timerForRenewal) {
+              renewToken(appCollector);
+            } else {
+              regenerateToken(appCollector);
             }
           } catch (Exception e) {
-            LOG.warn("Unable to renew token for " + appId);
+            LOG.warn("Unable to " + (timerForRenewal ? "renew" : "regenerate") +
+                " token for " + appId, e);
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fd6ae24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
index 39f0fc6..de5ccdc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.yarn.server.timelineservice.security;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.io.Text;
@@ -73,6 +75,9 @@ public class TimelineV2DelegationTokenSecretManagerService extends
   public static class TimelineV2DelegationTokenSecretManager extends
       AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
 
+    private static final Log LOG =
+        LogFactory.getLog(TimelineV2DelegationTokenSecretManager.class);
+
     /**
      * Create a timeline v2 secret manager.
      * @param delegationKeyUpdateInterval the number of milliseconds for rolling
@@ -111,5 +116,11 @@ public class TimelineV2DelegationTokenSecretManagerService extends
     public TimelineDelegationTokenIdentifier createIdentifier() {
       return new TimelineDelegationTokenIdentifier();
     }
+
+    @Override
+    protected void logExpireToken(TimelineDelegationTokenIdentifier ident)
+        throws IOException {
+      LOG.info("Token " + ident + " expired.");
+    }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: YARN-6318. timeline service schema creator fails if executed from a remote machine (Sangjin Lee via Varun Saxena)

Posted by st...@apache.org.
YARN-6318. timeline service schema creator fails if executed from a remote machine (Sangjin Lee via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44999aab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44999aab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44999aab

Branch: refs/heads/HADOOP-13345
Commit: 44999aabc27636706851f1c71f3ce500ee6eb027
Parents: 18b3a80
Author: Varun Saxena <va...@apache.org>
Authored: Tue Mar 14 02:05:01 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:52 2017 +0530

----------------------------------------------------------------------
 .../storage/TimelineSchemaCreator.java          |  5 ++-
 .../common/HBaseTimelineStorageUtils.java       | 18 +++++------
 .../common/TestHBaseTimelineStorageUtils.java   | 33 ++++++++++++++++++++
 3 files changed, 46 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44999aab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index 89be4f6..15885ce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
@@ -71,8 +72,10 @@ public final class TimelineSchemaCreator {
 
   public static void main(String[] args) throws Exception {
 
+    LOG.info("Starting the schema creation");
     Configuration hbaseConf =
-        HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null);
+        HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(
+            new YarnConfiguration());
     // Grab input args and allow for -Dxyz style arguments
     String[] otherArgs = new GenericOptionsParser(hbaseConf, args)
         .getRemainingArgs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44999aab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index f99637e..feef6af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -17,14 +17,18 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
+import java.io.IOException;
 import java.net.MalformedURLException;
 import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -33,15 +37,10 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.text.NumberFormat;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
 
 /**
  * A bunch of utility functions used in HBase TimelineService backend.
@@ -274,16 +273,17 @@ public final class HBaseTimelineStorageUtils {
    */
   public static Configuration getTimelineServiceHBaseConf(Configuration conf)
       throws MalformedURLException {
-    Configuration hbaseConf;
-
     if (conf == null) {
-      return HBaseConfiguration.create();
+      throw new NullPointerException();
     }
 
+    Configuration hbaseConf;
     String timelineServiceHBaseConfFileURL =
         conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
     if (timelineServiceHBaseConfFileURL != null
         && timelineServiceHBaseConfFileURL.length() > 0) {
+      LOG.info("Using hbase configuration at " +
+          timelineServiceHBaseConfFileURL);
       // create a clone so that we don't mess with out input one
       hbaseConf = new Configuration(conf);
       Configuration plainHBaseConf = new Configuration(false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44999aab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
new file mode 100644
index 0000000..402a89b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.junit.Test;
+
+/**
+ * Unit tests for HBaseTimelineStorageUtils static methos.
+ */
+public class TestHBaseTimelineStorageUtils {
+
+  @Test(expected=NullPointerException.class)
+  public void testGetTimelineServiceHBaseConfNullArgument() throws Exception {
+    HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null);
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: YARN-6130. [ATSv2 Security] Generate a delegation token for AM when app collector is created and pass it to AM via NM and RM. Contributed by Varun Saxena.

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
index 07058f6..eb4381d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
@@ -80,7 +80,7 @@ public class TestTimelineServiceClientIntegration {
       auxService =
           PerNodeTimelineCollectorsAuxService.launchServer(new String[0],
               collectorManager, conf);
-      auxService.addApplication(ApplicationId.newInstance(0, 1));
+      auxService.addApplication(ApplicationId.newInstance(0, 1), "user");
     } catch (ExitUtil.ExitException e) {
       fail();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index 608ef67..0ddf287 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -23,7 +23,10 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 import java.io.BufferedReader;
@@ -40,6 +43,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -51,10 +55,12 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timelineservice.collector.AppLevelTimelineCollector;
 import org.apache.hadoop.yarn.server.timelineservice.collector.NodeTimelineCollectorManager;
 import org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
 import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineReaderImpl;
@@ -76,7 +82,6 @@ public class TestTimelineAuthFilterForV2 {
 
   private static final String FOO_USER = "foo";
   private static final String HTTP_USER = "HTTP";
-
   private static final File TEST_ROOT_DIR = new File(
       System.getProperty("test.build.dir", "target" + File.separator +
           "test-dir"), UUID.randomUUID().toString());
@@ -88,21 +93,35 @@ public class TestTimelineAuthFilterForV2 {
   private static String httpSpnegoPrincipal = KerberosTestUtils.
       getServerPrincipal();
 
+  // First param indicates whether HTTPS access or HTTP access and second param
+  // indicates whether it is kerberos access or token based access.
   @Parameterized.Parameters
-  public static Collection<Object[]> withSsl() {
-    return Arrays.asList(new Object[][] {{false}, {true}});
+  public static Collection<Object[]> params() {
+    return Arrays.asList(new Object[][] {{false, true}, {false, false},
+        {true, false}, {true, true}});
   }
 
   private static MiniKdc testMiniKDC;
   private static String keystoresDir;
   private static String sslConfDir;
   private static Configuration conf;
+  private static UserGroupInformation nonKerberosUser;
+  static {
+    try {
+      nonKerberosUser = UserGroupInformation.getCurrentUser();
+    } catch (IOException e) {}
+  }
+  // Indicates whether HTTPS or HTTP access.
   private boolean withSsl;
+  // Indicates whether Kerberos based login is used or token based access is
+  // done.
+  private boolean withKerberosLogin;
   private NodeTimelineCollectorManager collectorManager;
   private PerNodeTimelineCollectorsAuxService auxService;
-
-  public TestTimelineAuthFilterForV2(boolean withSsl) {
+  public TestTimelineAuthFilterForV2(boolean withSsl,
+      boolean withKerberosLogin) {
     this.withSsl = withSsl;
+    this.withKerberosLogin = withKerberosLogin;
   }
 
   @BeforeClass
@@ -143,8 +162,6 @@ public class TestTimelineAuthFilterForV2 {
       conf.set("hadoop.proxyuser.HTTP.hosts", "*");
       conf.set("hadoop.proxyuser.HTTP.users", FOO_USER);
       UserGroupInformation.setConfiguration(conf);
-      SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
-          YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, "localhost");
     } catch (Exception e) {
       fail("Couldn't setup TimelineServer V2.");
     }
@@ -166,9 +183,27 @@ public class TestTimelineAuthFilterForV2 {
       conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
           HttpConfig.Policy.HTTP_ONLY.name());
     }
+    UserGroupInformation.setConfiguration(conf);
     collectorManager = new DummyNodeTimelineCollectorManager();
-    auxService = PerNodeTimelineCollectorsAuxService.launchServer(new String[0],
-        collectorManager, conf);
+    auxService = PerNodeTimelineCollectorsAuxService.launchServer(
+        new String[0], collectorManager, conf);
+    if (withKerberosLogin) {
+      SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+          YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, "localhost");
+    }
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    auxService.addApplication(
+        appId, UserGroupInformation.getCurrentUser().getUserName());
+    if (!withKerberosLogin) {
+      AppLevelTimelineCollector collector =
+          (AppLevelTimelineCollector)collectorManager.get(appId);
+      org.apache.hadoop.security.token.Token
+          <TimelineDelegationTokenIdentifier> token =
+              collector.getDelegationTokenForApp();
+      token.setService(new Text("localhost" + token.getService().toString().
+          substring(token.getService().toString().indexOf(":"))));
+      UserGroupInformation.getCurrentUser().addToken(token);
+    }
   }
 
   private TimelineV2Client createTimelineClientForUGI(ApplicationId appId) {
@@ -199,9 +234,14 @@ public class TestTimelineAuthFilterForV2 {
     }
     if (withSsl) {
       KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-      File base = new File(BASEDIR);
-      FileUtil.fullyDelete(base);
+      FileUtil.fullyDelete(new File(BASEDIR));
     }
+    if (withKerberosLogin) {
+      UserGroupInformation.getCurrentUser().logoutUserFromKeytab();
+    }
+    // Reset the user for next run.
+    UserGroupInformation.setLoginUser(
+        UserGroupInformation.createRemoteUser(nonKerberosUser.getUserName()));
   }
 
   private static TimelineEntity createEntity(String id, String type) {
@@ -241,35 +281,44 @@ public class TestTimelineAuthFilterForV2 {
     }
   }
 
+  private void publishAndVerifyEntity(ApplicationId appId, File entityTypeDir,
+      String entityType) throws Exception {
+    TimelineV2Client client = createTimelineClientForUGI(appId);
+    try {
+    // Sync call. Results available immediately.
+      client.putEntities(createEntity("entity1", entityType));
+      assertEquals(1, entityTypeDir.listFiles().length);
+      verifyEntity(entityTypeDir, "entity1", entityType);
+      // Async call.
+      client.putEntitiesAsync(createEntity("entity2", entityType));
+    } finally {
+      client.stop();
+    }
+  }
+
   @Test
   public void testPutTimelineEntities() throws Exception {
-    ApplicationId appId = ApplicationId.newInstance(0, 1);
-    auxService.addApplication(appId);
     final String entityType = "dummy_type";
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
     File entityTypeDir = new File(TEST_ROOT_DIR.getAbsolutePath() +
         File.separator + "entities" + File.separator +
-        YarnConfiguration.DEFAULT_RM_CLUSTER_ID + File.separator + "test_user" +
+        YarnConfiguration.DEFAULT_RM_CLUSTER_ID + File.separator +
+        UserGroupInformation.getCurrentUser().getUserName() +
         File.separator + "test_flow_name" + File.separator +
         "test_flow_version" + File.separator + "1" + File.separator +
         appId.toString() + File.separator + entityType);
     try {
-      KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
-        @Override
-        public Void call() throws Exception {
-          TimelineV2Client client = createTimelineClientForUGI(appId);
-          try {
-            // Sync call. Results available immediately.
-            client.putEntities(createEntity("entity1", entityType));
-            assertEquals(1, entityTypeDir.listFiles().length);
-            verifyEntity(entityTypeDir, "entity1", entityType);
-            // Async call.
-            client.putEntitiesAsync(createEntity("entity2", entityType));
+      if (withKerberosLogin) {
+        KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
+          @Override
+          public Void call() throws Exception {
+            publishAndVerifyEntity(appId, entityTypeDir, entityType);
             return null;
-          } finally {
-            client.stop();
           }
-        }
-      });
+        });
+      } else {
+        publishAndVerifyEntity(appId, entityTypeDir, entityType);
+      }
       // Wait for async entity to be published.
       for (int i = 0; i < 50; i++) {
         if (entityTypeDir.listFiles().length == 2) {
@@ -279,6 +328,11 @@ public class TestTimelineAuthFilterForV2 {
       }
       assertEquals(2, entityTypeDir.listFiles().length);
       verifyEntity(entityTypeDir, "entity2", entityType);
+      AppLevelTimelineCollector collector =
+          (AppLevelTimelineCollector)collectorManager.get(appId);
+      auxService.removeApplication(appId);
+      verify(collectorManager.getTokenManagerService()).cancelToken(
+          eq(collector.getDelegationTokenForApp()), any(String.class));
     } finally {
       FileUtils.deleteQuietly(entityTypeDir);
     }
@@ -291,12 +345,19 @@ public class TestTimelineAuthFilterForV2 {
     }
 
     @Override
+    protected TimelineV2DelegationTokenSecretManagerService
+        createTokenManagerService() {
+      return spy(new TimelineV2DelegationTokenSecretManagerService());
+    }
+
+    @Override
     protected CollectorNodemanagerProtocol getNMCollectorService() {
       CollectorNodemanagerProtocol protocol =
           mock(CollectorNodemanagerProtocol.class);
       try {
         GetTimelineCollectorContextResponse response =
-            GetTimelineCollectorContextResponse.newInstance("test_user",
+            GetTimelineCollectorContextResponse.newInstance(
+                UserGroupInformation.getCurrentUser().getUserName(),
                 "test_flow_name", "test_flow_version", 1L);
         when(protocol.getTimelineCollectorContext(any(
             GetTimelineCollectorContextRequest.class))).thenReturn(response);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index 10d68bb..08ac894 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -22,9 +22,12 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -42,13 +45,20 @@ public class AppLevelTimelineCollector extends TimelineCollector {
       LoggerFactory.getLogger(TimelineCollector.class);
 
   private final ApplicationId appId;
+  private final String appUser;
   private final TimelineCollectorContext context;
   private UserGroupInformation currentUser;
+  private Token<TimelineDelegationTokenIdentifier> delegationTokenForApp;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
+    this(appId, null);
+  }
+
+  public AppLevelTimelineCollector(ApplicationId appId, String user) {
     super(AppLevelTimelineCollector.class.getName() + " - " + appId.toString());
     Preconditions.checkNotNull(appId, "AppId shouldn't be null");
     this.appId = appId;
+    this.appUser = user;
     context = new TimelineCollectorContext();
   }
 
@@ -56,6 +66,20 @@ public class AppLevelTimelineCollector extends TimelineCollector {
     return currentUser;
   }
 
+  public String getAppUser() {
+    return appUser;
+  }
+
+  void setDelegationTokenForApp(
+      Token<TimelineDelegationTokenIdentifier> token) {
+    this.delegationTokenForApp = token;
+  }
+
+  @VisibleForTesting
+  public Token<TimelineDelegationTokenIdentifier> getDelegationTokenForApp() {
+    return this.delegationTokenForApp;
+  }
+
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
     context.setClusterId(conf.get(YarnConfiguration.RM_CLUSTER_ID,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
index ac91275..6c0d693 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
@@ -56,8 +56,8 @@ public class AppLevelTimelineCollectorWithAgg
   private ScheduledThreadPoolExecutor appAggregationExecutor;
   private AppLevelAggregator appAggregator;
 
-  public AppLevelTimelineCollectorWithAgg(ApplicationId appId) {
-    super(appId);
+  public AppLevelTimelineCollectorWithAgg(ApplicationId appId, String user) {
+    super(appId, user);
   }
 
   private static Set<String> initializeSkipSet() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
index bb51734..02362b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
@@ -28,14 +28,17 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
@@ -71,6 +74,8 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
 
   private final boolean runningAsAuxService;
 
+  private UserGroupInformation loginUGI;
+
   static final String COLLECTOR_MANAGER_ATTR_KEY = "collector.manager";
 
   @VisibleForTesting
@@ -85,25 +90,40 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
 
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
-    tokenMgrService = new TimelineV2DelegationTokenSecretManagerService();
+    tokenMgrService = createTokenManagerService();
     addService(tokenMgrService);
+    this.loginUGI = UserGroupInformation.getCurrentUser();
     super.serviceInit(conf);
   }
 
   @Override
   protected void serviceStart() throws Exception {
-    if (UserGroupInformation.isSecurityEnabled() && !runningAsAuxService) {
+    if (UserGroupInformation.isSecurityEnabled()) {
       // Do security login for cases where collector is running outside NM.
-      try {
-        doSecureLogin();
-      } catch(IOException ie) {
-        throw new YarnRuntimeException("Failed to login", ie);
+      if (!runningAsAuxService) {
+        try {
+          doSecureLogin();
+        } catch(IOException ie) {
+          throw new YarnRuntimeException("Failed to login", ie);
+        }
       }
+      this.loginUGI = UserGroupInformation.getLoginUser();
     }
     super.serviceStart();
     startWebApp();
   }
 
+  protected TimelineV2DelegationTokenSecretManagerService
+      createTokenManagerService() {
+    return new TimelineV2DelegationTokenSecretManagerService();
+  }
+
+  @VisibleForTesting
+  public TimelineV2DelegationTokenSecretManagerService
+      getTokenManagerService() {
+    return tokenMgrService;
+  }
+
   private void doSecureLogin() throws IOException {
     Configuration conf = getConfig();
     InetSocketAddress addr = NetUtils.createSocketAddr(conf.getTrimmed(
@@ -122,13 +142,45 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
     super.serviceStop();
   }
 
+  @VisibleForTesting
+  public Token<TimelineDelegationTokenIdentifier> generateTokenForAppCollector(
+      String user) {
+    Token<TimelineDelegationTokenIdentifier> token  = tokenMgrService.
+        generateToken(UserGroupInformation.createRemoteUser(user),
+            loginUGI.getShortUserName());
+    token.setService(new Text(timelineRestServerBindAddress));
+    return token;
+  }
+
+  @VisibleForTesting
+  public void cancelTokenForAppCollector(
+      AppLevelTimelineCollector appCollector) throws IOException {
+    if (appCollector.getDelegationTokenForApp() != null) {
+      tokenMgrService.cancelToken(appCollector.getDelegationTokenForApp(),
+          appCollector.getAppUser());
+    }
+  }
+
   @Override
   protected void doPostPut(ApplicationId appId, TimelineCollector collector) {
     try {
       // Get context info from NM
       updateTimelineCollectorContext(appId, collector);
+      // Generate token for app collector.
+      org.apache.hadoop.yarn.api.records.Token token = null;
+      if (UserGroupInformation.isSecurityEnabled() &&
+          collector instanceof AppLevelTimelineCollector) {
+        AppLevelTimelineCollector appCollector =
+            (AppLevelTimelineCollector)collector;
+        Token<TimelineDelegationTokenIdentifier> timelineToken =
+            generateTokenForAppCollector(appCollector.getAppUser());
+        appCollector.setDelegationTokenForApp(timelineToken);
+        token = org.apache.hadoop.yarn.api.records.Token.newInstance(
+            timelineToken.getIdentifier(), timelineToken.getKind().toString(),
+            timelineToken.getPassword(), timelineToken.getService().toString());
+      }
       // Report to NM if a new collector is added.
-      reportNewCollectorToNM(appId);
+      reportNewCollectorToNM(appId, token);
     } catch (YarnException | IOException e) {
       // throw exception here as it cannot be used if failed communicate with NM
       LOG.error("Failed to communicate with NM Collector Service for " + appId);
@@ -136,6 +188,18 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
     }
   }
 
+  @Override
+  protected void postRemove(ApplicationId appId, TimelineCollector collector) {
+    if (collector instanceof AppLevelTimelineCollector) {
+      try {
+        cancelTokenForAppCollector((AppLevelTimelineCollector)collector);
+      } catch (IOException e) {
+        LOG.warn("Failed to cancel token for app collector with appId " +
+            appId, e);
+      }
+    }
+  }
+
   /**
    * Launch the REST web server for this collector manager.
    */
@@ -180,11 +244,12 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
         timelineRestServerBindAddress);
   }
 
-  private void reportNewCollectorToNM(ApplicationId appId)
+  private void reportNewCollectorToNM(ApplicationId appId,
+      org.apache.hadoop.yarn.api.records.Token token)
       throws YarnException, IOException {
     ReportNewCollectorInfoRequest request =
         ReportNewCollectorInfoRequest.newInstance(appId,
-            this.timelineRestServerBindAddress);
+            this.timelineRestServerBindAddress, token);
     LOG.info("Report a new collector for application: " + appId +
         " to the NM Collector Service.");
     getNMCollectorService().reportNewCollectorInfo(request);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index d4cde64..66f9aab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -114,11 +114,12 @@ public class PerNodeTimelineCollectorsAuxService extends AuxiliaryService {
    * exists, no new service is created.
    *
    * @param appId Application Id to be added.
+   * @param user Application Master container user.
    * @return whether it was added successfully
    */
-  public boolean addApplication(ApplicationId appId) {
+  public boolean addApplication(ApplicationId appId, String user) {
     AppLevelTimelineCollector collector =
-        new AppLevelTimelineCollectorWithAgg(appId);
+        new AppLevelTimelineCollectorWithAgg(appId, user);
     return (collectorManager.putIfAbsent(appId, collector)
         == collector);
   }
@@ -147,7 +148,7 @@ public class PerNodeTimelineCollectorsAuxService extends AuxiliaryService {
     if (context.getContainerType() == ContainerType.APPLICATION_MASTER) {
       ApplicationId appId = context.getContainerId().
           getApplicationAttemptId().getApplicationId();
-      addApplication(appId);
+      addApplication(appId, context.getUser());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
index eef8436..de7db58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
@@ -18,8 +18,13 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.security;
 
+import java.io.IOException;
+
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSecretManagerService;
@@ -43,6 +48,17 @@ public class TimelineV2DelegationTokenSecretManagerService extends
         tokenMaxLifetime, tokenRenewInterval, tokenRemovalScanInterval);
   }
 
+  public Token<TimelineDelegationTokenIdentifier> generateToken(
+      UserGroupInformation ugi, String renewer) {
+    return ((TimelineV2DelegationTokenSecretManager)
+        getTimelineDelegationTokenSecretManager()).generateToken(ugi, renewer);
+  }
+
+  public void cancelToken(Token<TimelineDelegationTokenIdentifier> token,
+      String canceller) throws IOException {
+    getTimelineDelegationTokenSecretManager().cancelToken(token, canceller);
+  }
+
   /**
    * Delegation token secret manager for ATSv2.
    */
@@ -70,6 +86,21 @@ public class TimelineV2DelegationTokenSecretManagerService extends
           delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
     }
 
+    public Token<TimelineDelegationTokenIdentifier> generateToken(
+        UserGroupInformation ugi, String renewer) {
+      Text realUser = null;
+      if (ugi.getRealUser() != null) {
+        realUser = new Text(ugi.getRealUser().getUserName());
+      }
+      TimelineDelegationTokenIdentifier identifier = createIdentifier();
+      identifier.setOwner(new Text(ugi.getUserName()));
+      identifier.setRenewer(new Text(renewer));
+      identifier.setRealUser(realUser);
+      byte[] password = createPassword(identifier);
+      return new Token<TimelineDelegationTokenIdentifier>(identifier.getBytes(),
+          password, identifier.getKind(), null);
+    }
+
     @Override
     public TimelineDelegationTokenIdentifier createIdentifier() {
       return new TimelineDelegationTokenIdentifier();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
index a59f8c1..af9acce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
@@ -95,7 +95,7 @@ public class TestNMTimelineCollectorManager {
       Callable<Boolean> task = new Callable<Boolean>() {
         public Boolean call() {
           AppLevelTimelineCollector collector =
-              new AppLevelTimelineCollectorWithAgg(appId);
+              new AppLevelTimelineCollectorWithAgg(appId, "user");
           return (collectorManager.putIfAbsent(appId, collector) == collector);
         }
       };
@@ -126,7 +126,7 @@ public class TestNMTimelineCollectorManager {
       Callable<Boolean> task = new Callable<Boolean>() {
         public Boolean call() {
           AppLevelTimelineCollector collector =
-              new AppLevelTimelineCollectorWithAgg(appId);
+              new AppLevelTimelineCollectorWithAgg(appId, "user");
           boolean successPut =
               (collectorManager.putIfAbsent(appId, collector) == collector);
           return successPut && collectorManager.remove(appId);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-6027. Support fromid(offset) filter for /flows API (Rohith Sharma K S via Varun Saxena)

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index d7eff32..6cdf937 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -185,7 +185,7 @@ public class TimelineReaderServer extends CompositeService {
   }
 
   @VisibleForTesting
-  int getWebServerPort() {
+  public int getWebServerPort() {
     return readerWebServer.getConnectorAddress(0).getPort();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
index c93c631..8f92433 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderUtils.java
@@ -24,14 +24,30 @@ import java.util.List;
 
 import org.apache.commons.lang.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Set of utility methods to be used across timeline reader.
  */
-final class TimelineReaderUtils {
+public final class TimelineReaderUtils {
   private TimelineReaderUtils() {
   }
 
   /**
+   * Default delimiter for joining strings.
+   */
+  @VisibleForTesting
+  public static final char DEFAULT_DELIMITER_CHAR = '!';
+
+  /**
+   * Default escape character used for joining strings.
+   */
+  @VisibleForTesting
+  public static final char DEFAULT_ESCAPE_CHAR = '*';
+
+  public static final String FROMID_KEY = "FROM_ID";
+
+  /**
    * Split the passed string along the passed delimiter character while looking
    * for escape char to interpret the splitted parts correctly. For delimiter or
    * escape character to be interpreted as part of the string, they have to be
@@ -168,4 +184,14 @@ final class TimelineReaderUtils {
     // Join the strings after they have been escaped.
     return StringUtils.join(strs, delimiterChar);
   }
+
+  public static List<String> split(final String str)
+      throws IllegalArgumentException {
+    return split(str, DEFAULT_DELIMITER_CHAR, DEFAULT_ESCAPE_CHAR);
+  }
+
+  public static String joinAndEscapeStrings(final String[] strs) {
+    return joinAndEscapeStrings(strs, DEFAULT_DELIMITER_CHAR,
+        DEFAULT_ESCAPE_CHAR);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 7133528..290c255 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -1334,6 +1334,10 @@ public class TimelineReaderWebServices {
    *     2 dates.
    *     "daterange=20150711-" returns flows active on and after 20150711.
    *     "daterange=-20150711" returns flows active on and before 20150711.
+   * @param fromId If specified, retrieve the next set of flows from the given
+   *     fromId. The set of flows retrieved is inclusive of specified fromId.
+   *     fromId should be taken from the value associated with FROM_ID info key
+   *     in flow entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowActivityEntity</cite> instances are returned.<br>
@@ -1350,8 +1354,9 @@ public class TimelineReaderWebServices {
       @Context HttpServletRequest req,
       @Context HttpServletResponse res,
       @QueryParam("limit") String limit,
-      @QueryParam("daterange") String dateRange) {
-    return getFlows(req, res, null, limit, dateRange);
+      @QueryParam("daterange") String dateRange,
+      @QueryParam("fromid") String fromId) {
+    return getFlows(req, res, null, limit, dateRange, fromId);
   }
 
   /**
@@ -1380,6 +1385,10 @@ public class TimelineReaderWebServices {
    *     2 dates.
    *     "daterange=20150711-" returns flows active on and after 20150711.
    *     "daterange=-20150711" returns flows active on and before 20150711.
+   * @param fromId If specified, retrieve the next set of flows from the given
+   *     fromId. The set of flows retrieved is inclusive of specified fromId.
+   *     fromId should be taken from the value associated with FROM_ID info key
+   *     in flow entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowActivityEntity</cite> instances are returned.<br>
@@ -1397,7 +1406,8 @@ public class TimelineReaderWebServices {
       @Context HttpServletResponse res,
       @PathParam("clusterid") String clusterId,
       @QueryParam("limit") String limit,
-      @QueryParam("daterange") String dateRange) {
+      @QueryParam("daterange") String dateRange,
+      @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1414,7 +1424,7 @@ public class TimelineReaderWebServices {
       TimelineEntityFilters entityFilters =
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
               limit, null, null, null, null, null, null, null, null, null,
-              null);
+              fromId);
       entityFilters.setCreatedTimeBegin(range.dateStart);
       entityFilters.setCreatedTimeEnd(range.dateEnd);
       entities = timelineReaderManager.getEntities(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
index 780cfd0..52e24e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
@@ -195,39 +195,29 @@ enum TimelineUIDConverter {
   };
 
   /**
-   * Delimiter used for UID.
-   */
-  public static final char UID_DELIMITER_CHAR = '!';
-
-  /**
-   * Escape Character used if delimiter or escape character itself is part of
-   * different components of UID.
-   */
-  public static final char UID_ESCAPE_CHAR = '*';
-
-  /**
-   * Split UID using {@link #UID_DELIMITER_CHAR} and {@link #UID_ESCAPE_CHAR}.
+   * Split UID using {@link TimelineReaderUtils#DEFAULT_DELIMITER_CHAR} and
+   * {@link TimelineReaderUtils#DEFAULT_ESCAPE_CHAR}.
    * @param uid UID to be splitted.
    * @return a list of different parts of UID split across delimiter.
    * @throws IllegalArgumentException if UID is not properly escaped.
    */
   private static List<String> splitUID(String uid)
       throws IllegalArgumentException {
-    return TimelineReaderUtils.split(uid, UID_DELIMITER_CHAR, UID_ESCAPE_CHAR);
+    return TimelineReaderUtils.split(uid);
   }
 
   /**
-   * Join different parts of UID delimited by {@link #UID_DELIMITER_CHAR} with
-   * delimiter and escape character escaped using {@link #UID_ESCAPE_CHAR} if
-   * UID parts contain them.
+   * Join different parts of UID delimited by
+   * {@link TimelineReaderUtils#DEFAULT_DELIMITER_CHAR} with delimiter and
+   * escape character escaped using
+   * {@link TimelineReaderUtils#DEFAULT_ESCAPE_CHAR} if UID parts contain them.
    * @param parts an array of UID parts to be joined.
    * @return a string joined using the delimiter with escape and delimiter
-   *     characters escaped if they are part of the string parts to be joined.
-   *     Returns null if one of the parts is null.
+   *         characters escaped if they are part of the string parts to be
+   *         joined. Returns null if one of the parts is null.
    */
   private static String joinAndEscapeUIDParts(String[] parts) {
-    return TimelineReaderUtils.joinAndEscapeStrings(parts, UID_DELIMITER_CHAR,
-        UID_ESCAPE_CHAR);
+    return TimelineReaderUtils.joinAndEscapeStrings(parts);
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-6027. Support fromid(offset) filter for /flows API (Rohith Sharma K S via Varun Saxena)

Posted by st...@apache.org.
YARN-6027. Support fromid(offset) filter for /flows API (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bb26465
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bb26465
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bb26465

Branch: refs/heads/HADOOP-13345
Commit: 8bb26465956a37d7398818bc0919772e12953725
Parents: cf30b3b
Author: Varun Saxena <va...@apache.org>
Authored: Thu Mar 2 01:49:34 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:52 2017 +0530

----------------------------------------------------------------------
 .../AbstractTimelineReaderHBaseTestBase.java    | 176 ++++++++
 ...stTimelineReaderWebServicesHBaseStorage.java | 424 ++++++++-----------
 .../storage/common/KeyConverterToString.java    |  38 ++
 .../storage/flow/FlowActivityRowKey.java        |  59 ++-
 .../reader/FlowActivityEntityReader.java        |  28 +-
 .../storage/common/TestRowKeys.java             |  21 +
 .../reader/TimelineReaderServer.java            |   2 +-
 .../reader/TimelineReaderUtils.java             |  28 +-
 .../reader/TimelineReaderWebServices.java       |  18 +-
 .../reader/TimelineUIDConverter.java            |  30 +-
 10 files changed, 549 insertions(+), 275 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
new file mode 100644
index 0000000..7853c94
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URL;
+import java.util.List;
+
+import javax.ws.rs.core.MediaType;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+import org.junit.Assert;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.GenericType;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
+import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
+
+/**
+ * Test Base for TimelineReaderServer HBase tests.
+ */
+public abstract class AbstractTimelineReaderHBaseTestBase {
+  private static int serverPort;
+  private static TimelineReaderServer server;
+  private static HBaseTestingUtility util;
+
+  public static void setup() throws Exception {
+    util = new HBaseTestingUtility();
+    Configuration conf = util.getConfiguration();
+    conf.setInt("hfile.format.version", 3);
+    util.startMiniCluster();
+    DataGeneratorForTest.createSchema(util.getConfiguration());
+  }
+
+  public static void tearDown() throws Exception {
+    if (server != null) {
+      server.stop();
+      server = null;
+    }
+    if (util != null) {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  protected static void initialize() throws Exception {
+    try {
+      Configuration config = util.getConfiguration();
+      config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+      config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+      config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+          "localhost:0");
+      config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
+      config.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
+          "org.apache.hadoop.yarn.server.timelineservice.storage."
+              + "HBaseTimelineReaderImpl");
+      config.setInt("hfile.format.version", 3);
+      server = new TimelineReaderServer() {
+        @Override
+        protected void setupOptions(Configuration conf) {
+          // The parent code tries to use HttpServer2 from this version of
+          // Hadoop, but the tests are loading in HttpServer2 from
+          // ${hbase-compatible-hadoop.version}. This version uses Jetty 9
+          // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
+          // are many differences, including classnames and packages.
+          // We do nothing here, so that we don't cause a NoSuchMethodError.
+          // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
+          // we should be able to remove this @Override.
+        }
+      };
+      server.init(config);
+      server.start();
+      serverPort = server.getWebServerPort();
+    } catch (Exception e) {
+      Assert.fail("Web server failed to start");
+    }
+  }
+
+  protected Client createClient() {
+    ClientConfig cfg = new DefaultClientConfig();
+    cfg.getClasses().add(YarnJacksonJaxbJsonProvider.class);
+    return new Client(
+        new URLConnectionClientHandler(new DummyURLConnectionFactory()), cfg);
+  }
+
+  protected ClientResponse getResponse(Client client, URI uri)
+      throws Exception {
+    ClientResponse resp =
+        client.resource(uri).accept(MediaType.APPLICATION_JSON)
+            .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    if (resp == null || resp.getStatusInfo()
+        .getStatusCode() != ClientResponse.Status.OK.getStatusCode()) {
+      String msg = "";
+      if (resp != null) {
+        msg = String.valueOf(resp.getStatusInfo().getStatusCode());
+      }
+      throw new IOException(
+          "Incorrect response from timeline reader. " + "Status=" + msg);
+    }
+    return resp;
+  }
+
+  protected void verifyHttpResponse(Client client, URI uri, Status status) {
+    ClientResponse resp =
+        client.resource(uri).accept(MediaType.APPLICATION_JSON)
+            .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertNotNull(resp);
+    assertTrue("Response from server should have been " + status,
+        resp.getStatusInfo().getStatusCode() == status.getStatusCode());
+    System.out.println("Response is: " + resp.getEntity(String.class));
+  }
+
+  protected List<FlowActivityEntity> verifyFlowEntites(Client client, URI uri,
+      int noOfEntities) throws Exception {
+    ClientResponse resp = getResponse(client, uri);
+    List<FlowActivityEntity> entities =
+        resp.getEntity(new GenericType<List<FlowActivityEntity>>() {
+        });
+    assertNotNull(entities);
+    assertEquals(noOfEntities, entities.size());
+    return entities;
+  }
+
+  protected static class DummyURLConnectionFactory
+      implements HttpURLConnectionFactory {
+
+    @Override
+    public HttpURLConnection getHttpURLConnection(final URL url)
+        throws IOException {
+      try {
+        return (HttpURLConnection) url.openConnection();
+      } catch (UndeclaredThrowableException e) {
+        throw new IOException(e.getCause());
+      }
+    }
+  }
+
+  protected static HBaseTestingUtility getHBaseTestingUtility() {
+    return util;
+  }
+
+  public static int getServerPort() {
+    return serverPort;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 9d4aa95..d0f674f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -24,10 +24,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.net.HttpURLConnection;
 import java.net.URI;
-import java.net.URL;
 import java.text.DateFormat;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -39,7 +36,6 @@ import java.util.Set;
 import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
@@ -50,17 +46,11 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
-import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
-import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
-import org.junit.After;
 import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -70,27 +60,26 @@ import com.sun.jersey.api.client.Client;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.GenericType;
-import com.sun.jersey.api.client.config.ClientConfig;
-import com.sun.jersey.api.client.config.DefaultClientConfig;
-import com.sun.jersey.client.urlconnection.HttpURLConnectionFactory;
-import com.sun.jersey.client.urlconnection.URLConnectionClientHandler;
-
-public class TestTimelineReaderWebServicesHBaseStorage {
-  private int serverPort;
-  private TimelineReaderServer server;
-  private static HBaseTestingUtility util;
+
+/**
+ * Test TimelineReder Web Service REST API's.
+ */
+public class TestTimelineReaderWebServicesHBaseStorage
+    extends AbstractTimelineReaderHBaseTestBase {
   private static long ts = System.currentTimeMillis();
   private static long dayTs =
       HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
 
   @BeforeClass
-  public static void setup() throws Exception {
-    util = new HBaseTestingUtility();
-    Configuration conf = util.getConfiguration();
-    conf.setInt("hfile.format.version", 3);
-    util.startMiniCluster();
-    DataGeneratorForTest.createSchema(conf);
+  public static void setupBeforeClass() throws Exception {
+    setup();
     loadData();
+    initialize();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    tearDown();
   }
 
   private static void loadData() throws Exception {
@@ -344,7 +333,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     }
 
     HBaseTimelineWriterImpl hbi = null;
-    Configuration c1 = util.getConfiguration();
+    Configuration c1 = getHBaseTestingUtility().getConfiguration();
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
@@ -393,84 +382,6 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     }
   }
 
-  @AfterClass
-  public static void tearDown() throws Exception {
-    util.shutdownMiniCluster();
-  }
-
-  @Before
-  public void init() throws Exception {
-    try {
-      Configuration config = util.getConfiguration();
-      config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-      config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
-      config.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
-          "localhost:0");
-      config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
-      config.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
-          "org.apache.hadoop.yarn.server.timelineservice.storage." +
-              "HBaseTimelineReaderImpl");
-      config.setInt("hfile.format.version", 3);
-      server = new TimelineReaderServer() {
-        @Override
-        protected void setupOptions(Configuration conf) {
-          // The parent code tries to use HttpServer2 from this version of
-          // Hadoop, but the tests are loading in HttpServer2 from
-          // ${hbase-compatible-hadoop.version}.  This version uses Jetty 9
-          // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
-          // are many differences, including classnames and packages.
-          // We do nothing here, so that we don't cause a NoSuchMethodError.
-          // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
-          // we should be able to remove this @Override.
-        }
-      };
-      server.init(config);
-      server.start();
-      serverPort = server.getWebServerPort();
-    } catch (Exception e) {
-      Assert.fail("Web server failed to start");
-    }
-  }
-
-  private static Client createClient() {
-    ClientConfig cfg = new DefaultClientConfig();
-    cfg.getClasses().add(YarnJacksonJaxbJsonProvider.class);
-    return new Client(new URLConnectionClientHandler(
-        new DummyURLConnectionFactory()), cfg);
-  }
-
-  private static ClientResponse getResponse(Client client, URI uri)
-      throws Exception {
-    ClientResponse resp =
-        client.resource(uri).accept(MediaType.APPLICATION_JSON)
-        .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-    if (resp == null ||
-        resp.getStatusInfo().getStatusCode() !=
-            ClientResponse.Status.OK.getStatusCode()) {
-      String msg = "";
-      if (resp != null) {
-        msg = String.valueOf(resp.getStatusInfo().getStatusCode());
-      }
-      throw new IOException("Incorrect response from timeline reader. " +
-          "Status=" + msg);
-    }
-    return resp;
-  }
-
-  private static class DummyURLConnectionFactory
-      implements HttpURLConnectionFactory {
-
-    @Override
-    public HttpURLConnection getHttpURLConnection(final URL url)
-        throws IOException {
-      try {
-        return (HttpURLConnection)url.openConnection();
-      } catch (UndeclaredThrowableException e) {
-        throw new IOException(e.getCause());
-      }
-    }
-  }
-
   private static TimelineEntity newEntity(String type, String id) {
     TimelineEntity entity = new TimelineEntity();
     entity.setIdentifier(new TimelineEntity.Identifier(type, id));
@@ -512,22 +423,11 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     return false;
   }
 
-  private static void verifyHttpResponse(Client client, URI uri,
-      Status status) {
-    ClientResponse resp =
-        client.resource(uri).accept(MediaType.APPLICATION_JSON)
-        .type(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-    assertNotNull(resp);
-    assertTrue("Response from server should have been " + status,
-        resp.getStatusInfo().getStatusCode() == status.getStatusCode());
-    System.out.println("Response is: " + resp.getEntity(String.class));
-  }
-
   @Test
   public void testGetFlowRun() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
           "1002345678919");
       ClientResponse resp = getResponse(client, uri);
@@ -548,7 +448,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
 
       // Query without specifying cluster ID.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/runs/1002345678919");
       resp = getResponse(client, uri);
       entity = resp.getEntity(FlowRunEntity.class);
@@ -573,7 +473,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlowRuns() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs");
       ClientResponse resp = getResponse(client, uri);
       Set<FlowRunEntity> entities =
@@ -593,8 +493,9 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
-          "clusters/cluster1/users/user1/flows/flow_name/runs?limit=1");
+      uri =
+          URI.create("http://localhost:" + getServerPort() + "/ws/v2/timeline/"
+              + "clusters/cluster1/users/user1/flows/flow_name/runs?limit=1");
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<Set<FlowRunEntity>>(){});
       assertEquals(MediaType.APPLICATION_JSON_TYPE + "; charset=utf-8",
@@ -609,7 +510,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "createdtimestart=1425016501030");
       resp = getResponse(client, uri);
@@ -626,7 +527,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "createdtimestart=1425016500999&createdtimeend=1425016501035");
       resp = getResponse(client, uri);
@@ -646,7 +547,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "createdtimeend=1425016501030");
       resp = getResponse(client, uri);
@@ -663,7 +564,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertEquals(0, entity.getMetrics().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "fields=metrics");
       resp = getResponse(client, uri);
@@ -686,7 +587,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // fields as CONFIGS will lead to a HTTP 400 as it makes no sense for
       // flow runs.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "fields=CONFIGS");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
@@ -699,7 +600,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlowRunsMetricsToRetrieve() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "metricstoretrieve=MAP_,HDFS_");
       ClientResponse resp = getResponse(client, uri);
@@ -719,7 +620,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
       assertEquals(3, metricCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs?" +
           "metricstoretrieve=!(MAP_,HDFS_)");
       resp = getResponse(client, uri);
@@ -746,7 +647,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     Client client = createClient();
     try {
       // Query all flows.
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/flows");
       ClientResponse resp = getResponse(client, uri);
       Set<FlowActivityEntity> flowEntities =
@@ -772,7 +673,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       // Query flowruns based on UID returned in query above.
       List<String> listFlowRunUIDs = new ArrayList<String>();
       for (String flowUID : listFlowUIDs) {
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/flow-uid/" + flowUID + "/runs");
         resp = getResponse(client, uri);
         Set<FlowRunEntity> frEntities =
@@ -792,7 +693,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // Query single flowrun based on UIDs' returned in query to get flowruns.
       for (String flowRunUID : listFlowRunUIDs) {
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/run-uid/" + flowRunUID);
         resp = getResponse(client, uri);
         FlowRunEntity entity = resp.getEntity(FlowRunEntity.class);
@@ -804,7 +705,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       for (String flowRunUID : listFlowRunUIDs) {
         TimelineReaderContext context =
             TimelineUIDConverter.FLOWRUN_UID.decodeUID(flowRunUID);
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/run-uid/" + flowRunUID + "/apps");
         resp = getResponse(client, uri);
         Set<TimelineEntity> appEntities =
@@ -824,7 +725,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // Query single app based on UIDs' returned in query to get apps.
       for (String appUID : listAppUIDs) {
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/app-uid/" + appUID);
         resp = getResponse(client, uri);
         TimelineEntity entity = resp.getEntity(TimelineEntity.class);
@@ -837,7 +738,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       for (String appUID : listAppUIDs) {
         TimelineReaderContext context =
             TimelineUIDConverter.APPLICATION_UID.decodeUID(appUID);
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/app-uid/" + appUID + "/entities/type1");
         resp = getResponse(client, uri);
         Set<TimelineEntity> entities =
@@ -859,39 +760,39 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // Query single entity based on UIDs' returned in query to get entities.
       for (String entityUID : listEntityUIDs) {
-        uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+        uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
             "timeline/entity-uid/" + entityUID);
         resp = getResponse(client, uri);
         TimelineEntity entity = resp.getEntity(TimelineEntity.class);
         assertNotNull(entity);
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/flow-uid/dummy:flow/runs");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/run-uid/dummy:flowrun");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
       // Run Id is not a numerical value.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/run-uid/some:dummy:flow:123v456");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/run-uid/dummy:flowrun/apps");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/app-uid/dummy:app");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/app-uid/dummy:app/entities/type1");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/entity-uid/dummy:entity");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
     } finally {
@@ -905,7 +806,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     try {
       String appUIDWithFlowInfo =
           "cluster1!user1!flow_name!1002345678919!application_1111111111_1111";
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/"+
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/"+
           "timeline/app-uid/" + appUIDWithFlowInfo);
       ClientResponse resp = getResponse(client, uri);
       TimelineEntity appEntity1 = resp.getEntity(TimelineEntity.class);
@@ -914,8 +815,9 @@ public class TestTimelineReaderWebServicesHBaseStorage {
           TimelineEntityType.YARN_APPLICATION.toString(), appEntity1.getType());
       assertEquals("application_1111111111_1111", appEntity1.getId());
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
-          "app-uid/" + appUIDWithFlowInfo + "/entities/type1");
+      uri =
+          URI.create("http://localhost:" + getServerPort() + "/ws/v2/timeline/"
+              + "app-uid/" + appUIDWithFlowInfo + "/entities/type1");
       resp = getResponse(client, uri);
       Set<TimelineEntity> entities1 =
           resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
@@ -932,8 +834,8 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
 
       String appUIDWithoutFlowInfo = "cluster1!application_1111111111_1111";
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/"+
-          "app-uid/" + appUIDWithoutFlowInfo);
+      uri = URI.create("http://localhost:" + getServerPort()
+          + "/ws/v2/timeline/" + "app-uid/" + appUIDWithoutFlowInfo);
       resp = getResponse(client, uri);
       TimelineEntity appEntity2 = resp.getEntity(TimelineEntity.class);
       assertNotNull(appEntity2);
@@ -941,8 +843,9 @@ public class TestTimelineReaderWebServicesHBaseStorage {
           TimelineEntityType.YARN_APPLICATION.toString(), appEntity2.getType());
       assertEquals("application_1111111111_1111", appEntity2.getId());
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
-          "app-uid/" + appUIDWithoutFlowInfo + "/entities/type1");
+      uri =
+          URI.create("http://localhost:" + getServerPort() + "/ws/v2/timeline/"
+              + "app-uid/" + appUIDWithoutFlowInfo + "/entities/type1");
       resp = getResponse(client, uri);
       Set<TimelineEntity> entities2 =
           resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
@@ -959,8 +862,8 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
 
       String entityUIDWithFlowInfo = appUIDWithFlowInfo + "!type1!0!entity1";
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/"+
-          "entity-uid/" + entityUIDWithFlowInfo);
+      uri = URI.create("http://localhost:" + getServerPort()
+          + "/ws/v2/timeline/" + "entity-uid/" + entityUIDWithFlowInfo);
       resp = getResponse(client, uri);
       TimelineEntity singleEntity1 = resp.getEntity(TimelineEntity.class);
       assertNotNull(singleEntity1);
@@ -969,8 +872,8 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       String entityUIDWithoutFlowInfo =
           appUIDWithoutFlowInfo + "!type1!0!entity1";
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/"+
-          "entity-uid/" + entityUIDWithoutFlowInfo);
+      uri = URI.create("http://localhost:" + getServerPort()
+          + "/ws/v2/timeline/" + "entity-uid/" + entityUIDWithoutFlowInfo);
       resp = getResponse(client, uri);
       TimelineEntity singleEntity2 = resp.getEntity(TimelineEntity.class);
       assertNotNull(singleEntity2);
@@ -987,7 +890,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     try {
       String appUID =
           "cluster1!user*1!flow_name!1002345678919!application_1111111111_1111";
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/"+
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/"+
           "timeline/app-uid/" + appUID);
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
     } finally {
@@ -999,19 +902,19 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlows() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows");
 
       verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
           new String[] {"flow1", "flow_name", "flow_name2"});
 
       // Query without specifying cluster ID.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/flows/");
       verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
           new String[] {"flow1", "flow_name", "flow_name2"});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
               "timeline/clusters/cluster1/flows?limit=1");
       verifyFlowEntites(client, uri, 1, new int[] {3},
           new String[] {"flow1"});
@@ -1020,43 +923,43 @@ public class TestTimelineReaderWebServicesHBaseStorage {
           HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
 
       DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange="
           + fmt.format(firstFlowActivity) + "-"
           + fmt.format(dayTs));
       verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
           new String[] {"flow1", "flow_name", "flow_name2"});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=" +
           fmt.format(dayTs + (4*86400000L)));
       verifyFlowEntites(client, uri, 0, new int[] {}, new String[] {});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=-" +
           fmt.format(dayTs));
       verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
           new String[] {"flow1", "flow_name", "flow_name2"});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=" +
            fmt.format(firstFlowActivity) + "-");
       verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
           new String[] {"flow1", "flow_name", "flow_name2"});
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=20150711:20150714");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=20150714-20150711");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=2015071129-20150712");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/flows?daterange=20150711-2015071243");
       verifyHttpResponse(client, uri, Status.BAD_REQUEST);
     } finally {
@@ -1065,10 +968,47 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   }
 
   @Test
+  public void testGetFlowsForPagination() throws Exception {
+    Client client = createClient();
+    int noOfEntities = 3;
+    int limit = 2;
+    try {
+      String flowURI = "http://localhost:" + getServerPort() + "/ws/v2/"
+          + "timeline/clusters/cluster1/flows";
+      URI uri = URI.create(flowURI);
+      List<FlowActivityEntity> flowEntites =
+          verifyFlowEntites(client, uri, 3, new int[] {3, 2, 1},
+              new String[] {"flow1", "flow_name", "flow_name2"});
+      FlowActivityEntity fEntity1 = flowEntites.get(0);
+      FlowActivityEntity fEntity3 = flowEntites.get(noOfEntities - 1);
+
+      uri = URI.create(flowURI + "?limit=" + limit);
+      flowEntites = verifyFlowEntites(client, uri, limit);
+      assertEquals(fEntity1, flowEntites.get(0));
+      FlowActivityEntity fEntity2 = flowEntites.get(limit - 1);
+
+      uri = URI
+          .create(flowURI + "?limit=" + limit + "&fromid="
+              + fEntity2.getInfo().get(TimelineReaderUtils.FROMID_KEY));
+      flowEntites = verifyFlowEntites(client, uri, noOfEntities - limit + 1);
+      assertEquals(fEntity2, flowEntites.get(0));
+      assertEquals(fEntity3, flowEntites.get(noOfEntities - limit));
+
+      uri = URI
+          .create(flowURI + "?limit=" + limit + "&fromid="
+              + fEntity3.getInfo().get(TimelineReaderUtils.FROMID_KEY));
+      flowEntites = verifyFlowEntites(client, uri, 1);
+      assertEquals(fEntity3, flowEntites.get(0));
+    } finally {
+      client.destroy();
+    }
+  }
+
+  @Test
   public void testGetApp() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111?" +
           "userid=user1&fields=ALL&flowname=flow_name&flowrunid=1002345678919");
       ClientResponse resp = getResponse(client, uri);
@@ -1086,7 +1026,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertTrue(verifyMetrics(metric, m1, m2, m3));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
               "timeline/apps/application_1111111111_2222?userid=user1" +
               "&fields=metrics&flowname=flow_name&flowrunid=1002345678919");
       resp = getResponse(client, uri);
@@ -1108,7 +1048,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetAppWithoutFlowInfo() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111?" +
           "fields=ALL");
       ClientResponse resp = getResponse(client, uri);
@@ -1127,7 +1067,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertTrue(verifyMetrics(metric, m1, m2, m3));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111?" +
           "fields=ALL&metricslimit=10");
       resp = getResponse(client, uri);
@@ -1157,7 +1097,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetEntityWithoutFlowInfo() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity1");
       ClientResponse resp = getResponse(client, uri);
@@ -1174,7 +1114,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetEntitiesWithoutFlowInfo() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1");
       ClientResponse resp = getResponse(client, uri);
@@ -1198,7 +1138,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetEntitiesDataToRetrieve() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?confstoretrieve=cfg_");
       ClientResponse resp = getResponse(client, uri);
@@ -1215,7 +1155,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
       assertEquals(2, cfgCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?confstoretrieve=cfg_,config_");
       resp = getResponse(client, uri);
@@ -1232,7 +1172,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
       assertEquals(5, cfgCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?confstoretrieve=!(cfg_,config_)");
       resp = getResponse(client, uri);
@@ -1248,7 +1188,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
       assertEquals(1, cfgCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricstoretrieve=MAP_");
       resp = getResponse(client, uri);
@@ -1264,7 +1204,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
       assertEquals(1, metricCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricstoretrieve=MAP1_,HDFS_");
       resp = getResponse(client, uri);
@@ -1281,7 +1221,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
       assertEquals(3, metricCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricstoretrieve=!(MAP1_,HDFS_)");
       resp = getResponse(client, uri);
@@ -1306,7 +1246,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetEntitiesConfigFilters() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=config_param1%20eq%20value1%20OR%20" +
           "config_param1%20eq%20value3");
@@ -1320,7 +1260,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
             entity.getId().equals("entity2"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=config_param1%20eq%20value1%20AND" +
           "%20configuration_param2%20eq%20value2");
@@ -1331,7 +1271,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // conffilters=(config_param1 eq value1 AND configuration_param2 eq
       // value2) OR (config_param1 eq value3 AND cfg_param3 eq value1)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=(config_param1%20eq%20value1%20AND" +
           "%20configuration_param2%20eq%20value2)%20OR%20(config_param1%20eq" +
@@ -1349,7 +1289,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // conffilters=(config_param1 eq value1 AND configuration_param2 eq
       // value2) OR (config_param1 eq value3 AND cfg_param3 eq value1)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=(config_param1%20eq%20value1%20AND" +
           "%20configuration_param2%20eq%20value2)%20OR%20(config_param1%20eq" +
@@ -1365,7 +1305,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
       assertEquals(3, cfgCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=(config_param1%20eq%20value1%20AND" +
           "%20configuration_param2%20eq%20value2)%20OR%20(config_param1%20eq" +
@@ -1391,7 +1331,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       // entity1. For ne, both entity1 and entity2 will be returned. For ene,
       // only entity2 will be returned as we are checking for existence too.
       // conffilters=configuration_param2 ne value3
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=configuration_param2%20ne%20value3");
       resp = getResponse(client, uri);
@@ -1403,7 +1343,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
             entity.getId().equals("entity2"));
       }
       // conffilters=configuration_param2 ene value3
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?conffilters=configuration_param2%20ene%20value3");
       resp = getResponse(client, uri);
@@ -1423,7 +1363,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     Client client = createClient();
     try {
       // infofilters=info1 eq cluster1 OR info1 eq cluster2
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info1%20eq%20cluster1%20OR%20info1%20eq" +
           "%20cluster2");
@@ -1438,7 +1378,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
 
       // infofilters=info1 eq cluster1 AND info4 eq 35000
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info1%20eq%20cluster1%20AND%20info4%20" +
           "eq%2035000");
@@ -1448,7 +1388,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       assertEquals(0, entities.size());
 
       // infofilters=info4 eq 35000 OR info4 eq 36000
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info4%20eq%2035000%20OR%20info4%20eq" +
           "%2036000");
@@ -1463,7 +1403,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // infofilters=(info1 eq cluster1 AND info4 eq 35000) OR
       // (info1 eq cluster2 AND info2 eq 2.0)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=(info1%20eq%20cluster1%20AND%20info4%20" +
           "eq%2035000)%20OR%20(info1%20eq%20cluster2%20AND%20info2%20eq%202.0" +
@@ -1482,7 +1422,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // infofilters=(info1 eq cluster1 AND info4 eq 35000) OR
       // (info1 eq cluster2 AND info2 eq 2.0)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=(info1%20eq%20cluster1%20AND%20info4%20" +
           "eq%2035000)%20OR%20(info1%20eq%20cluster2%20AND%20info2%20eq%20" +
@@ -1504,7 +1444,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       // both entity1 and entity2 will be returned. For ene, only entity2 will
       // be returned as we are checking for existence too.
       // infofilters=info3 ne 39000
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info3%20ne%2039000");
       resp = getResponse(client, uri);
@@ -1516,7 +1456,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
             entity.getId().equals("entity2"));
       }
       // infofilters=info3 ene 39000
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?infofilters=info3%20ene%2039000");
       resp = getResponse(client, uri);
@@ -1536,7 +1476,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     Client client = createClient();
     try {
       // metricfilters=HDFS_BYTES_READ lt 60 OR HDFS_BYTES_READ eq 157
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=HDFS_BYTES_READ%20lt%2060%20OR%20" +
           "HDFS_BYTES_READ%20eq%20157");
@@ -1551,7 +1491,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
 
       // metricfilters=HDFS_BYTES_READ lt 60 AND MAP_SLOT_MILLIS gt 40
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040");
@@ -1562,7 +1502,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // metricfilters=(HDFS_BYTES_READ lt 60 AND MAP_SLOT_MILLIS gt 40) OR
       // (MAP1_SLOT_MILLIS ge 140 AND MAP11_SLOT_MILLIS le 122)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" +
@@ -1580,7 +1520,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // metricfilters=(HDFS_BYTES_READ lt 60 AND MAP_SLOT_MILLIS gt 40) OR
       // (MAP1_SLOT_MILLIS ge 140 AND MAP11_SLOT_MILLIS le 122)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" +
@@ -1598,7 +1538,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // metricfilters=(HDFS_BYTES_READ lt 60 AND MAP_SLOT_MILLIS gt 40) OR
       // (MAP1_SLOT_MILLIS ge 140 AND MAP11_SLOT_MILLIS le 122)
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" +
@@ -1619,7 +1559,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
       assertEquals(2, metricCnt);
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=(HDFS_BYTES_READ%20lt%2060%20AND%20" +
           "MAP_SLOT_MILLIS%20gt%2040)%20OR%20(MAP1_SLOT_MILLIS%20ge" +
@@ -1652,7 +1592,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       // entity1. For ne, both entity1 and entity2 will be returned. For ene,
       // only entity2 will be returned as we are checking for existence too.
       // metricfilters=MAP11_SLOT_MILLIS ne 100
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=MAP11_SLOT_MILLIS%20ne%20100");
       resp = getResponse(client, uri);
@@ -1664,7 +1604,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
             entity.getId().equals("entity2"));
       }
       // metricfilters=MAP11_SLOT_MILLIS ene 100
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?metricfilters=MAP11_SLOT_MILLIS%20ene%20100");
       resp = getResponse(client, uri);
@@ -1683,7 +1623,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetEntitiesEventFilters() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?eventfilters=event1,event3");
       ClientResponse resp = getResponse(client, uri);
@@ -1696,7 +1636,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
             entity.getId().equals("entity2"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?eventfilters=!(event1,event3)");
       resp = getResponse(client, uri);
@@ -1705,7 +1645,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       assertEquals(0, entities.size());
 
       // eventfilters=!(event1,event3) OR event5,event6
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?eventfilters=!(event1,event3)%20OR%20event5,event6");
       resp = getResponse(client, uri);
@@ -1718,7 +1658,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       //  eventfilters=(!(event1,event3) OR event5,event6) OR
       // (event1,event2 AND (event3,event4))
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?eventfilters=(!(event1,event3)%20OR%20event5," +
           "event6)%20OR%20(event1,event2%20AND%20(event3,event4))");
@@ -1739,7 +1679,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetEntitiesRelationFilters() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1?isrelatedto=type3:entity31,type2:entity21:entity22");
       ClientResponse resp = getResponse(client, uri);
@@ -1752,7 +1692,8 @@ public class TestTimelineReaderWebServicesHBaseStorage {
             entity.getId().equals("entity2"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri = URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?isrelatedto=!(type3:entity31,type2:entity21:entity22)");
       resp = getResponse(client, uri);
@@ -1762,7 +1703,8 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // isrelatedto=!(type3:entity31,type2:entity21:entity22)OR type5:entity51,
       // type6:entity61:entity66
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri = URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?isrelatedto=!(type3:entity31,type2:entity21:entity22)%20OR%20" +
           "type5:entity51,type6:entity61:entity66");
@@ -1777,7 +1719,8 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       // isrelatedto=(!(type3:entity31,type2:entity21:entity22)OR type5:
       // entity51,type6:entity61:entity66) OR (type1:entity14,type2:entity21:
       // entity22 AND (type3:entity32:entity35,type4:entity42))
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri = URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?isrelatedto=(!(type3:entity31,type2:entity21:entity22)%20OR%20" +
           "type5:entity51,type6:entity61:entity66)%20OR%20(type1:entity14," +
@@ -1794,7 +1737,8 @@ public class TestTimelineReaderWebServicesHBaseStorage {
 
       // relatesto=!(type3:entity31,type2:entity21:entity22)OR type5:entity51,
       // type6:entity61:entity66
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri = URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?relatesto=!%20(type3:entity31,type2:entity21:entity22%20)%20OR%20" +
           "type5:entity51,type6:entity61:entity66");
@@ -1809,7 +1753,9 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       // relatesto=(!(type3:entity31,type2:entity21:entity22)OR type5:entity51,
       // type6:entity61:entity66) OR (type1:entity14,type2:entity21:entity22 AND
       // (type3:entity32:entity35 , type4:entity42))
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
+      uri =
+          URI.create("http://localhost:" + getServerPort() +
+          "/ws/v2/timeline/" +
           "clusters/cluster1/apps/application_1111111111_1111/entities/type1" +
           "?relatesto=(!(%20type3:entity31,type2:entity21:entity22)%20OR%20" +
           "type5:entity51,type6:entity61:entity66%20)%20OR%20(type1:entity14," +
@@ -1835,7 +1781,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetEntityDataToRetrieve() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?confstoretrieve=cfg_,configuration_");
       ClientResponse resp = getResponse(client, uri);
@@ -1849,7 +1795,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
             configKey.startsWith("cfg_"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?confstoretrieve=!(cfg_,configuration_)");
       resp = getResponse(client, uri);
@@ -1862,7 +1808,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertTrue(configKey.startsWith("config_"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?metricstoretrieve=MAP1_,HDFS_");
       resp = getResponse(client, uri);
@@ -1876,7 +1822,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
             metric.getId().startsWith("HDFS_"));
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?metricstoretrieve=!(MAP1_,HDFS_)");
       resp = getResponse(client, uri);
@@ -1891,7 +1837,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         assertEquals(1, metric.getValues().size());
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
           "entities/type1/entity2?metricstoretrieve=!(MAP1_,HDFS_)&" +
           "metricslimit=5");
@@ -1914,7 +1860,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlowRunApps() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
           "1002345678919/apps?fields=ALL");
       ClientResponse resp = getResponse(client, uri);
@@ -1934,7 +1880,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         }
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
               "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
               "1002345678919/apps?fields=ALL&metricslimit=2");
       resp = getResponse(client, uri);
@@ -1954,14 +1900,14 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
 
       // Query without specifying cluster ID.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/runs/1002345678919/apps");
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
       assertNotNull(entities);
       assertEquals(2, entities.size());
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/runs/1002345678919/" +
           "apps?limit=1");
       resp = getResponse(client, uri);
@@ -1977,7 +1923,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlowApps() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "fields=ALL");
       ClientResponse resp = getResponse(client, uri);
@@ -2016,7 +1962,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
         }
       }
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "fields=ALL&metricslimit=6");
       resp = getResponse(client, uri);
@@ -2060,14 +2006,14 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       }
 
       // Query without specifying cluster ID.
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/apps");
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
       assertNotNull(entities);
       assertEquals(3, entities.size());
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/users/user1/flows/flow_name/apps?limit=1");
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
@@ -2083,7 +2029,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     Client client = createClient();
     try {
       String entityType = TimelineEntityType.YARN_APPLICATION.toString();
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "eventfilters=" + ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
       ClientResponse resp = getResponse(client, uri);
@@ -2094,7 +2040,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       assertTrue("Unexpected app in result", entities.contains(
           newEntity(entityType, "application_1111111111_1111")));
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "metricfilters=HDFS_BYTES_READ%20ge%200");
       resp = getResponse(client, uri);
@@ -2104,7 +2050,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
       assertTrue("Unexpected app in result", entities.contains(
           newEntity(entityType, "application_1111111111_1111")));
 
-      uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/apps?" +
           "conffilters=cfg1%20eq%20value1");
       resp = getResponse(client, uri);
@@ -2122,7 +2068,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlowRunNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
           "1002345678929");
       verifyHttpResponse(client, uri, Status.NOT_FOUND);
@@ -2135,7 +2081,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlowsNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster2/flows");
       ClientResponse resp = getResponse(client, uri);
       Set<FlowActivityEntity> entities =
@@ -2153,7 +2099,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetAppNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster1/apps/application_1111111111_1378");
       verifyHttpResponse(client, uri, Status.NOT_FOUND);
     } finally {
@@ -2165,7 +2111,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlowRunAppsNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster2/users/user1/flows/flow_name/runs/" +
           "1002345678919/apps");
       ClientResponse resp = getResponse(client, uri);
@@ -2184,7 +2130,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
   public void testGetFlowAppsNotPresent() throws Exception {
     Client client = createClient();
     try {
-      URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
           "timeline/clusters/cluster2/users/user1/flows/flow_name55/apps");
       ClientResponse resp = getResponse(client, uri);
       Set<TimelineEntity> entities =
@@ -2198,21 +2144,13 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     }
   }
 
-  @After
-  public void stop() throws Exception {
-    if (server != null) {
-      server.stop();
-      server = null;
-    }
-  }
-
   @Test
   public void testGenericEntitiesForPagination() throws Exception {
     Client client = createClient();
     try {
       int limit = 10;
       String queryParam = "?limit=" + limit;
-      String resourceUri = "http://localhost:" + serverPort + "/ws/v2/"
+      String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
           + "timeline/clusters/cluster1/apps/application_1111111111_1111/"
           + "entities/entitytype";
       URI uri = URI.create(resourceUri + queryParam);
@@ -2276,7 +2214,8 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     return entity;
   }
 
-  private void verifyFlowEntites(Client client, URI uri, int noOfEntities,
+  private List<FlowActivityEntity> verifyFlowEntites(Client client, URI uri,
+      int noOfEntities,
       int[] a, String[] flowsInSequence) throws Exception {
     ClientResponse resp = getResponse(client, uri);
     List<FlowActivityEntity> entities =
@@ -2292,6 +2231,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
           timelineEntity.getInfo().get("SYSTEM_INFO_FLOW_NAME"));
       assertEquals(a[count++], timelineEntity.getFlowRuns().size());
     }
+    return entities;
   }
 
   @Test
@@ -2300,7 +2240,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     try {
       // app entities stored is 15 during initialization.
       int totalAppEntities = 15;
-      String resourceUri = "http://localhost:" + serverPort + "/ws/v2/"
+      String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
           + "timeline/clusters/cluster1/users/user1/flows/flow1/apps";
       URI uri = URI.create(resourceUri);
       ClientResponse resp = getResponse(client, uri);
@@ -2344,7 +2284,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     try {
       // app entities stored is 15 during initialization.
       int totalAppEntities = 5;
-      String resourceUri = "http://localhost:" + serverPort + "/ws/v2/"
+      String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
           + "timeline/clusters/cluster1/users/user1/flows/flow1/runs/1/apps";
       URI uri = URI.create(resourceUri);
       ClientResponse resp = getResponse(client, uri);
@@ -2388,7 +2328,7 @@ public class TestTimelineReaderWebServicesHBaseStorage {
     try {
       // app entities stored is 15 during initialization.
       int totalRuns = 3;
-      String resourceUri = "http://localhost:" + serverPort + "/ws/v2/"
+      String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
           + "timeline/clusters/cluster1/users/user1/flows/flow1/runs";
       URI uri = URI.create(resourceUri);
       ClientResponse resp = getResponse(client, uri);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
new file mode 100644
index 0000000..1f52a7b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+/**
+ * Interface which has to be implemented for encoding and decoding row keys or
+ * column qualifiers as string.
+ */
+public interface KeyConverterToString<T> {
+  /**
+   * Encode key as string.
+   * @param key of type T to be encoded as string.
+   * @return encoded value as string.
+   */
+  String encodeAsString(T key);
+
+  /**
+   * Decode row key from string to a key of type T.
+   * @param encodedKey string representation of row key
+   * @return type T which has been constructed after decoding string.
+   */
+  T decodeFromString(String encodedKey);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
index bb77e36..b8a5dba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -17,10 +17,14 @@
  */
 package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
 /**
@@ -32,8 +36,8 @@ public class FlowActivityRowKey {
   private final Long dayTs;
   private final String userId;
   private final String flowName;
-  private final KeyConverter<FlowActivityRowKey> flowActivityRowKeyConverter =
-      new FlowActivityRowKeyConverter();
+  private final FlowActivityRowKeyConverter
+      flowActivityRowKeyConverter = new FlowActivityRowKeyConverter();
 
   /**
    * @param clusterId identifying the cluster
@@ -104,13 +108,32 @@ public class FlowActivityRowKey {
   }
 
   /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   * @return String representation of row key
+   */
+  public String getRowKeyAsString() {
+    return flowActivityRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the raw row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>FlowActivityRowKey</cite> object.
+   */
+  public static FlowActivityRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new FlowActivityRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
    * Encodes and decodes row key for flow activity table. The row key is of the
    * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
    * timestamp) is a long and rest are strings.
    * <p>
    */
-  final private static class FlowActivityRowKeyConverter implements
-      KeyConverter<FlowActivityRowKey> {
+  final private static class FlowActivityRowKeyConverter
+      implements KeyConverter<FlowActivityRowKey>,
+      KeyConverterToString<FlowActivityRowKey> {
 
     private FlowActivityRowKeyConverter() {
     }
@@ -192,5 +215,33 @@ public class FlowActivityRowKey {
               Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
       return new FlowActivityRowKey(clusterId, dayTs, userId, flowName);
     }
+
+    @Override
+    public String encodeAsString(FlowActivityRowKey key) {
+      if (key.getDayTimestamp() == null) {
+        return TimelineReaderUtils
+            .joinAndEscapeStrings(new String[] {key.clusterId});
+      } else if (key.getUserId() == null) {
+        return TimelineReaderUtils.joinAndEscapeStrings(
+            new String[] {key.clusterId, key.dayTs.toString()});
+      } else if (key.getFlowName() == null) {
+        return TimelineReaderUtils.joinAndEscapeStrings(
+            new String[] {key.clusterId, key.dayTs.toString(), key.userId});
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(new String[] {
+          key.clusterId, key.dayTs.toString(), key.userId, key.flowName});
+    }
+
+    @Override
+    public FlowActivityRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 4) {
+        throw new IllegalArgumentException(
+            "Invalid row key for flow activity.");
+      }
+      Long dayTs = Long.valueOf(split.get(1));
+      return new FlowActivityRowKey(split.get(0), dayTs, split.get(2),
+          split.get(3));
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
index c741d0e..a1cdb29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
@@ -41,6 +42,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityCo
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
 
 import com.google.common.base.Preconditions;
 
@@ -110,11 +112,30 @@ class FlowActivityEntityReader extends TimelineEntityReader {
       Connection conn, FilterList filterList) throws IOException {
     Scan scan = new Scan();
     String clusterId = getContext().getClusterId();
-    if (getFilters().getCreatedTimeBegin() == 0L &&
-        getFilters().getCreatedTimeEnd() == Long.MAX_VALUE) {
+    if (getFilters().getFromId() == null
+        && getFilters().getCreatedTimeBegin() == 0L
+        && getFilters().getCreatedTimeEnd() == Long.MAX_VALUE) {
        // All records have to be chosen.
       scan.setRowPrefixFilter(new FlowActivityRowKeyPrefix(clusterId)
           .getRowKeyPrefix());
+    } else if (getFilters().getFromId() != null) {
+      FlowActivityRowKey key = null;
+      try {
+        key =
+            FlowActivityRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!clusterId.equals(key.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + clusterId);
+      }
+      scan.setStartRow(key.getRowKey());
+      scan.setStopRow(
+          new FlowActivityRowKeyPrefix(clusterId,
+              (getFilters().getCreatedTimeBegin() <= 0 ? 0
+                  : (getFilters().getCreatedTimeBegin() - 1)))
+                      .getRowKeyPrefix());
     } else {
       scan.setStartRow(new FlowActivityRowKeyPrefix(clusterId, getFilters()
           .getCreatedTimeEnd()).getRowKeyPrefix());
@@ -157,7 +178,8 @@ class FlowActivityEntityReader extends TimelineEntityReader {
       flowRun.setId(flowRun.getId());
       flowActivity.addFlowRun(flowRun);
     }
-
+    flowActivity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
     return flowActivity;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bb26465/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
index cbd2273..bac5f85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
@@ -225,6 +226,26 @@ public class TestRowKeys {
   }
 
   @Test
+  public void testFlowActivityRowKeyAsString() {
+    String cluster = "cl" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR + "uster"
+        + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+    String user = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "user";
+    String fName = "dummy_" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR
+        + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "flow"
+        + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+    Long ts = 1459900830000L;
+    Long dayTimestamp = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+    String rowKeyAsString =
+        new FlowActivityRowKey(cluster, ts, user, fName).getRowKeyAsString();
+    FlowActivityRowKey rowKey =
+        FlowActivityRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(cluster, rowKey.getClusterId());
+    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
+    assertEquals(user, rowKey.getUserId());
+    assertEquals(fName, rowKey.getFlowName());
+  }
+
+  @Test
   public void testFlowRunRowKey() {
     byte[] byteRowKey =
         new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID).getRowKey();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: YARN-6820. Restrict read access to timelineservice v2 data. Contributed by Vrushali C

Posted by st...@apache.org.
YARN-6820. Restrict read access to timelineservice v2 data. Contributed by Vrushali C


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5ff965f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5ff965f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5ff965f

Branch: refs/heads/HADOOP-13345
Commit: d5ff965fee41fed28d3b94e11e546c1eb4c78a35
Parents: 60765af
Author: Jason Lowe <jl...@yahoo-inc.com>
Authored: Fri Aug 11 13:05:05 2017 -0500
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |  31 ++
 .../reader/TimelineReaderServer.java            |   5 +
 .../reader/TimelineReaderWebServicesUtils.java  |  29 +-
 ...elineReaderWhitelistAuthorizationFilter.java | 123 ++++++
 ...WhitelistAuthorizationFilterInitializer.java |  66 ++++
 ...elineReaderWhitelistAuthorizationFilter.java | 380 +++++++++++++++++++
 6 files changed, 630 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ff965f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index de90c69..ff9632a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2167,6 +2167,37 @@ public class YarnConfiguration extends Configuration {
       + "hbase.configuration.file";
 
   /**
+   * The name for setting that enables or disables authentication checks
+   * for reading timeline service v2 data.
+   */
+  public static final String TIMELINE_SERVICE_READ_AUTH_ENABLED =
+      TIMELINE_SERVICE_PREFIX + "read.authentication.enabled";
+
+  /**
+   * The default setting for authentication checks for reading timeline
+   * service v2 data.
+   */
+  public static final Boolean DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED =
+      false;
+
+  /**
+   * The name for setting that lists the users and groups who are allowed
+   * to read timeline service v2 data. It is a comma separated list of
+   * user, followed by space, then comma separated list of groups.
+   * It will allow this list of users and groups to read the data
+   * and reject everyone else.
+   */
+  public static final String TIMELINE_SERVICE_READ_ALLOWED_USERS =
+      TIMELINE_SERVICE_PREFIX + "read.allowed.users";
+
+  /**
+   * The default value for list of the users who are allowed to read
+   * timeline service v2 data.
+   */
+  public static final String DEFAULT_TIMELINE_SERVICE_READ_ALLOWED_USERS =
+      "";
+
+  /**
    * The setting that controls how long the final value of a metric of a
    * completed app is retained before merging into the flow sum. Up to this time
    * after an application is completed out-of-order values that arrive can be

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ff965f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 61f2425..5c049ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderAuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderWhitelistAuthorizationFilterInitializer;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader;
 import org.apache.hadoop.yarn.server.util.timeline.TimelineServerUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
@@ -158,6 +159,10 @@ public class TimelineReaderServer extends CompositeService {
       defaultInitializers.add(
           TimelineReaderAuthenticationFilterInitializer.class.getName());
     }
+
+    defaultInitializers.add(
+        TimelineReaderWhitelistAuthorizationFilterInitializer.class.getName());
+
     TimelineServerUtils.setTimelineFilters(
         conf, initializers, defaultInitializers);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ff965f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index cded3a1..d613eab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
+import java.security.Principal;
 import java.util.EnumSet;
 
 import javax.servlet.http.HttpServletRequest;
@@ -31,7 +32,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Fiel
 /**
  * Set of utility methods to be used by timeline reader web services.
  */
-final class TimelineReaderWebServicesUtils {
+public final class TimelineReaderWebServicesUtils {
 
   private TimelineReaderWebServicesUtils() {
   }
@@ -248,16 +249,36 @@ final class TimelineReaderWebServicesUtils {
   }
 
   /**
-   * Get UGI from HTTP request.
+   * Get UGI based on the remote user in the HTTP request.
+   *
    * @param req HTTP request.
    * @return UGI.
    */
-  static UserGroupInformation getUser(HttpServletRequest req) {
-    String remoteUser = req.getRemoteUser();
+  public static UserGroupInformation getUser(HttpServletRequest req) {
+    return getCallerUserGroupInformation(req, false);
+  }
+
+  /**
+   * Get UGI from the HTTP request.
+   *
+   * @param hsr HTTP request.
+   * @param usePrincipal if true, use principal name else use remote user name
+   * @return UGI.
+   */
+  public static UserGroupInformation getCallerUserGroupInformation(
+      HttpServletRequest hsr, boolean usePrincipal) {
+
+    String remoteUser = hsr.getRemoteUser();
+    if (usePrincipal) {
+      Principal princ = hsr.getUserPrincipal();
+      remoteUser = princ == null ? null : princ.getName();
+    }
+
     UserGroupInformation callerUGI = null;
     if (remoteUser != null) {
       callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
     }
+
     return callerUGI;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ff965f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
new file mode 100644
index 0000000..b22ea3f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilter.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader.security;
+
+import java.io.IOException;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.webapp.ForbiddenException;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderWebServicesUtils;
+
+/**
+ * Filter to check if a particular user is allowed to read ATSv2 data.
+ */
+
+public class TimelineReaderWhitelistAuthorizationFilter implements Filter {
+
+  public static final String EMPTY_STRING = "";
+
+  private static final Log LOG =
+      LogFactory.getLog(TimelineReaderWhitelistAuthorizationFilter.class);
+
+  private boolean isWhitelistReadAuthEnabled = false;
+
+  private AccessControlList allowedUsersAclList;
+  private AccessControlList adminAclList;
+
+  @Override
+  public void destroy() {
+    // NOTHING
+  }
+
+  @Override
+  public void doFilter(ServletRequest request, ServletResponse response,
+      FilterChain chain) throws IOException, ServletException {
+    if (isWhitelistReadAuthEnabled) {
+      UserGroupInformation callerUGI = TimelineReaderWebServicesUtils
+          .getCallerUserGroupInformation((HttpServletRequest) request, true);
+      if (callerUGI == null) {
+        String msg = "Unable to obtain user name, user not authenticated";
+        throw new AuthorizationException(msg);
+      }
+      if (!(adminAclList.isUserAllowed(callerUGI)
+          || allowedUsersAclList.isUserAllowed(callerUGI))) {
+        String userName = callerUGI.getShortUserName();
+        String msg = "User " + userName
+            + " is not allowed to read TimelineService V2 data.";
+        Response.status(Status.FORBIDDEN).entity(msg).build();
+        throw new ForbiddenException("user " + userName
+            + " is not allowed to read TimelineService V2 data");
+      }
+    }
+    if (chain != null) {
+      chain.doFilter(request, response);
+    }
+  }
+
+  @Override
+  public void init(FilterConfig conf) throws ServletException {
+    String isWhitelistReadAuthEnabledStr = conf
+        .getInitParameter(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED);
+    if (isWhitelistReadAuthEnabledStr == null) {
+      isWhitelistReadAuthEnabled =
+          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED;
+    } else {
+      isWhitelistReadAuthEnabled =
+          Boolean.valueOf(isWhitelistReadAuthEnabledStr);
+    }
+
+    if (isWhitelistReadAuthEnabled) {
+      String listAllowedUsers = conf.getInitParameter(
+          YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS);
+      if (StringUtils.isEmpty(listAllowedUsers)) {
+        listAllowedUsers =
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_ALLOWED_USERS;
+      }
+      LOG.info("listAllowedUsers=" + listAllowedUsers);
+      allowedUsersAclList = new AccessControlList(listAllowedUsers);
+      LOG.info("allowedUsersAclList=" + allowedUsersAclList.getUsers());
+      // also allow admins
+      String adminAclListStr =
+          conf.getInitParameter(YarnConfiguration.YARN_ADMIN_ACL);
+      if (StringUtils.isEmpty(adminAclListStr)) {
+        adminAclListStr =
+            TimelineReaderWhitelistAuthorizationFilter.EMPTY_STRING;
+        LOG.info("adminAclList not set, hence setting it to \"\"");
+      }
+      adminAclList = new AccessControlList(adminAclListStr);
+      LOG.info("adminAclList=" + adminAclList.getUsers());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ff965f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilterInitializer.java
new file mode 100644
index 0000000..a970731
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderWhitelistAuthorizationFilterInitializer.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader.security;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.FilterContainer;
+import org.apache.hadoop.http.FilterInitializer;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Filter initializer to initialize
+ * {@link TimelineReaderWhitelistAuthorizationFilter} for ATSv2 timeline reader
+ * with timeline service specific configurations.
+ */
+public class TimelineReaderWhitelistAuthorizationFilterInitializer
+    extends FilterInitializer {
+
+  /**
+   * Initializes {@link TimelineReaderWhitelistAuthorizationFilter}.
+   *
+   * @param container The filter container
+   * @param conf Configuration for run-time parameters
+   */
+  @Override
+  public void initFilter(FilterContainer container, Configuration conf) {
+    Map<String, String> params = new HashMap<String, String>();
+    String isWhitelistReadAuthEnabled = Boolean.toString(
+        conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED));
+    params.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED,
+        isWhitelistReadAuthEnabled);
+    params.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        conf.get(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_READ_ALLOWED_USERS));
+
+    params.put(YarnConfiguration.YARN_ADMIN_ACL,
+        conf.get(YarnConfiguration.YARN_ADMIN_ACL,
+            // using a default of ""
+            // instead of DEFAULT_YARN_ADMIN_ACL
+            // The reason being, DEFAULT_YARN_ADMIN_ACL is set to all users
+            // and we do not wish to allow everyone by default if
+            // read auth is enabled and YARN_ADMIN_ACL is unset
+            TimelineReaderWhitelistAuthorizationFilter.EMPTY_STRING));
+    container.addGlobalFilter("Timeline Reader Whitelist Authorization Filter",
+        TimelineReaderWhitelistAuthorizationFilter.class.getName(), params);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5ff965f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
new file mode 100644
index 0000000..bd4f0c2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWhitelistAuthorizationFilter.java
@@ -0,0 +1,380 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.Principal;
+import java.security.PrivilegedExceptionAction;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderWhitelistAuthorizationFilter;
+import org.apache.hadoop.yarn.webapp.ForbiddenException;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Unit tests for {@link TimelineReaderWhitelistAuthorizationFilter}.
+ *
+ */
+public class TestTimelineReaderWhitelistAuthorizationFilter {
+
+  final private static String GROUP1_NAME = "group1";
+  final private static String GROUP2_NAME = "group2";
+  final private static String GROUP3_NAME = "group3";
+  final private static String[] GROUP_NAMES =
+      new String[] {GROUP1_NAME, GROUP2_NAME, GROUP3_NAME};
+
+  private static class DummyFilterConfig implements FilterConfig {
+    final private Map<String, String> map;
+
+    DummyFilterConfig(Map<String, String> map) {
+      this.map = map;
+    }
+
+    @Override
+    public String getFilterName() {
+      return "dummy";
+    }
+
+    @Override
+    public String getInitParameter(String arg0) {
+      return map.get(arg0);
+    }
+
+    @Override
+    public Enumeration<String> getInitParameterNames() {
+      return Collections.enumeration(map.keySet());
+    }
+
+    @Override
+    public ServletContext getServletContext() {
+      return null;
+    }
+  }
+
+  @Test
+  public void checkFilterAllowedUser() throws ServletException, IOException {
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user1,user2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user1";
+      }
+    });
+
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    f.doFilter(mockHsr, r, null);
+  }
+
+  @Test(expected = ForbiddenException.class)
+  public void checkFilterNotAllowedUser() throws ServletException, IOException {
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user1,user2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "testuser1";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    f.doFilter(mockHsr, r, null);
+  }
+
+  @Test
+  public void checkFilterAllowedUserGroup()
+      throws ServletException, IOException, InterruptedException {
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user2 group1,group2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user1";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user1", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test(expected = ForbiddenException.class)
+  public void checkFilterNotAlloweGroup()
+      throws ServletException, IOException, InterruptedException {
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        " group5,group6");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user200";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user200", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void checkFilterAllowAdmins()
+      throws ServletException, IOException, InterruptedException {
+    // check that users in admin acl list are allowed to read
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user3 group5,group6");
+    map.put(YarnConfiguration.YARN_ADMIN_ACL, " group1,group2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user90";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user90", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void checkFilterAllowAdminsWhenNoUsersSet()
+      throws ServletException, IOException, InterruptedException {
+    // check that users in admin acl list are allowed to read
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    map.put(YarnConfiguration.YARN_ADMIN_ACL, " group1,group2");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user90";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user90", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test(expected = ForbiddenException.class)
+  public void checkFilterAllowNoOneWhenAdminAclsEmptyAndUserAclsEmpty()
+      throws ServletException, IOException, InterruptedException {
+    // check that users in admin acl list are allowed to read
+    Map<String, String> map = new HashMap<String, String>();
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_AUTH_ENABLED, "true");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user88";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user88", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void checkFilterReadAuthDisabledNoAclSettings()
+      throws ServletException, IOException, InterruptedException {
+    // Default settings for Read Auth Enabled (false)
+    // No values in admin acls or allowed read user list
+    Map<String, String> map = new HashMap<String, String>();
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+    HttpServletRequest mockHsr = Mockito.mock(HttpServletRequest.class);
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user437";
+      }
+    });
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        UserGroupInformation.createUserForTesting("user437", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void checkFilterReadAuthDisabledButAclSettingsPopulated()
+      throws ServletException, IOException, InterruptedException {
+    Map<String, String> map = new HashMap<String, String>();
+    // Default settings for Read Auth Enabled (false)
+    // But some values in admin acls and allowed read user list
+    map.put(YarnConfiguration.YARN_ADMIN_ACL, "user1,user2 group9,group21");
+    map.put(YarnConfiguration.TIMELINE_SERVICE_READ_ALLOWED_USERS,
+        "user27,user36 group5,group6");
+    TimelineReaderWhitelistAuthorizationFilter f =
+        new TimelineReaderWhitelistAuthorizationFilter();
+    FilterConfig fc = new DummyFilterConfig(map);
+    f.init(fc);
+
+    HttpServletRequest mockHsr = mock(HttpServletRequest.class);
+    when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user37";
+      }
+    });
+
+    ServletResponse r = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user1 =
+        // both username and group name are not part of admin and
+        // read allowed users
+        // but read auth is turned off
+        UserGroupInformation.createUserForTesting("user37", GROUP_NAMES);
+    user1.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r, null);
+        return null;
+      }
+    });
+
+    // test with username in read allowed users
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user27";
+      }
+    });
+    ServletResponse r2 = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user2 =
+        UserGroupInformation.createUserForTesting("user27", GROUP_NAMES);
+    user2.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r2, null);
+        return null;
+      }
+    });
+
+    // test with username in admin users
+    Mockito.when(mockHsr.getUserPrincipal()).thenReturn(new Principal() {
+      @Override
+      public String getName() {
+        return "user2";
+      }
+    });
+    ServletResponse r3 = Mockito.mock(ServletResponse.class);
+    UserGroupInformation user3 =
+        UserGroupInformation.createUserForTesting("user2", GROUP_NAMES);
+    user3.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        f.doFilter(mockHsr, r3, null);
+        return null;
+      }
+    });
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-6888. Refactor AppLevelTimelineCollector such that RM does not have aggregator threads created. Contributed by Vrushali C.

Posted by st...@apache.org.
YARN-6888. Refactor AppLevelTimelineCollector such that RM does not have aggregator threads created. Contributed by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fb71b13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fb71b13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fb71b13

Branch: refs/heads/HADOOP-13345
Commit: 3fb71b1393018e1001da55b794dda7d26491cf35
Parents: a990ff7
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Fri Jul 28 11:47:16 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../collector/AppLevelTimelineCollector.java    |  87 +----------
 .../AppLevelTimelineCollectorWithAgg.java       | 150 +++++++++++++++++++
 .../PerNodeTimelineCollectorsAuxService.java    |   2 +-
 .../TestNMTimelineCollectorManager.java         |   4 +-
 4 files changed, 155 insertions(+), 88 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb71b13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index 56f7b2b..10d68bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -18,27 +18,17 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.collector;
 
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
 /**
  * Service that handles writes to the timeline service and writes them to the
  * backing storage for a given YARN application.
@@ -51,15 +41,8 @@ public class AppLevelTimelineCollector extends TimelineCollector {
   private static final Logger LOG =
       LoggerFactory.getLogger(TimelineCollector.class);
 
-  private final static int AGGREGATION_EXECUTOR_NUM_THREADS = 1;
-  private final static int AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS = 15;
-  private static Set<String> entityTypesSkipAggregation
-      = initializeSkipSet();
-
   private final ApplicationId appId;
   private final TimelineCollectorContext context;
-  private ScheduledThreadPoolExecutor appAggregationExecutor;
-  private AppLevelAggregator appAggregator;
   private UserGroupInformation currentUser;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
@@ -69,12 +52,8 @@ public class AppLevelTimelineCollector extends TimelineCollector {
     context = new TimelineCollectorContext();
   }
 
-  private static Set<String> initializeSkipSet() {
-    Set<String> result = new HashSet<>();
-    result.add(TimelineEntityType.YARN_APPLICATION.toString());
-    result.add(TimelineEntityType.YARN_FLOW_RUN.toString());
-    result.add(TimelineEntityType.YARN_FLOW_ACTIVITY.toString());
-    return result;
+  public UserGroupInformation getCurrentUser() {
+    return currentUser;
   }
 
   @Override
@@ -92,29 +71,11 @@ public class AppLevelTimelineCollector extends TimelineCollector {
 
   @Override
   protected void serviceStart() throws Exception {
-    // Launch the aggregation thread
-    appAggregationExecutor = new ScheduledThreadPoolExecutor(
-        AppLevelTimelineCollector.AGGREGATION_EXECUTOR_NUM_THREADS,
-        new ThreadFactoryBuilder()
-            .setNameFormat("TimelineCollector Aggregation thread #%d")
-            .build());
-    appAggregator = new AppLevelAggregator();
-    appAggregationExecutor.scheduleAtFixedRate(appAggregator,
-        AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
-        AppLevelTimelineCollector.AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
-        TimeUnit.SECONDS);
     super.serviceStart();
   }
 
   @Override
   protected void serviceStop() throws Exception {
-    appAggregationExecutor.shutdown();
-    if (!appAggregationExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
-      LOG.info("App-level aggregator shutdown timed out, shutdown now. ");
-      appAggregationExecutor.shutdownNow();
-    }
-    // Perform one round of aggregation after the aggregation executor is done.
-    appAggregator.aggregate();
     super.serviceStop();
   }
 
@@ -123,48 +84,4 @@ public class AppLevelTimelineCollector extends TimelineCollector {
     return context;
   }
 
-  @Override
-  protected Set<String> getEntityTypesSkipAggregation() {
-    return entityTypesSkipAggregation;
-  }
-
-  private class AppLevelAggregator implements Runnable {
-
-    private void aggregate() {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("App-level real-time aggregating");
-      }
-      if (!isReadyToAggregate()) {
-        LOG.warn("App-level collector is not ready, skip aggregation. ");
-        return;
-      }
-      try {
-        TimelineCollectorContext currContext = getTimelineEntityContext();
-        Map<String, AggregationStatusTable> aggregationGroups
-            = getAggregationGroups();
-        if (aggregationGroups == null
-            || aggregationGroups.isEmpty()) {
-          LOG.debug("App-level collector is empty, skip aggregation. ");
-          return;
-        }
-        TimelineEntity resultEntity = TimelineCollector.aggregateWithoutGroupId(
-            aggregationGroups, currContext.getAppId(),
-            TimelineEntityType.YARN_APPLICATION.toString());
-        TimelineEntities entities = new TimelineEntities();
-        entities.addEntity(resultEntity);
-        putEntitiesAsync(entities, currentUser);
-      } catch (Exception e) {
-        LOG.error("Error aggregating timeline metrics", e);
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("App-level real-time aggregation complete");
-      }
-    }
-
-    @Override
-    public void run() {
-      aggregate();
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb71b13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
new file mode 100644
index 0000000..ac91275
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollectorWithAgg.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.collector;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.conf.Configuration;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Service that handles aggregations for applications
+ * and makes use of {@link AppLevelTimelineCollector} class for
+ * writes to Timeline Service.
+ *
+ * App-related lifecycle management is handled by this service.
+ */
+@Private
+@Unstable
+public class AppLevelTimelineCollectorWithAgg
+    extends AppLevelTimelineCollector {
+  private static final Log LOG = LogFactory.getLog(TimelineCollector.class);
+
+  private final static int AGGREGATION_EXECUTOR_NUM_THREADS = 1;
+  private final static int AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS = 15;
+  private static Set<String> entityTypesSkipAggregation
+      = initializeSkipSet();
+
+  private ScheduledThreadPoolExecutor appAggregationExecutor;
+  private AppLevelAggregator appAggregator;
+
+  public AppLevelTimelineCollectorWithAgg(ApplicationId appId) {
+    super(appId);
+  }
+
+  private static Set<String> initializeSkipSet() {
+    Set<String> result = new HashSet<>();
+    result.add(TimelineEntityType.YARN_APPLICATION.toString());
+    result.add(TimelineEntityType.YARN_FLOW_RUN.toString());
+    result.add(TimelineEntityType.YARN_FLOW_ACTIVITY.toString());
+    return result;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    // Launch the aggregation thread
+    appAggregationExecutor = new ScheduledThreadPoolExecutor(
+        AppLevelTimelineCollectorWithAgg.AGGREGATION_EXECUTOR_NUM_THREADS,
+        new ThreadFactoryBuilder()
+            .setNameFormat("TimelineCollector Aggregation thread #%d")
+            .build());
+    appAggregator = new AppLevelAggregator();
+    appAggregationExecutor.scheduleAtFixedRate(appAggregator,
+        AppLevelTimelineCollectorWithAgg.
+          AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
+        AppLevelTimelineCollectorWithAgg.
+          AGGREGATION_EXECUTOR_EXEC_INTERVAL_SECS,
+        TimeUnit.SECONDS);
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    appAggregationExecutor.shutdown();
+    if (!appAggregationExecutor.awaitTermination(10, TimeUnit.SECONDS)) {
+      LOG.info("App-level aggregator shutdown timed out, shutdown now. ");
+      appAggregationExecutor.shutdownNow();
+    }
+    // Perform one round of aggregation after the aggregation executor is done.
+    appAggregator.aggregate();
+    super.serviceStop();
+  }
+
+  @Override
+  protected Set<String> getEntityTypesSkipAggregation() {
+    return entityTypesSkipAggregation;
+  }
+
+  private class AppLevelAggregator implements Runnable {
+
+    private void aggregate() {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("App-level real-time aggregating");
+      }
+      if (!isReadyToAggregate()) {
+        LOG.warn("App-level collector is not ready, skip aggregation. ");
+        return;
+      }
+      try {
+        TimelineCollectorContext currContext = getTimelineEntityContext();
+        Map<String, AggregationStatusTable> aggregationGroups
+            = getAggregationGroups();
+        if (aggregationGroups == null
+            || aggregationGroups.isEmpty()) {
+          LOG.debug("App-level collector is empty, skip aggregation. ");
+          return;
+        }
+        TimelineEntity resultEntity = TimelineCollector.aggregateWithoutGroupId(
+            aggregationGroups, currContext.getAppId(),
+            TimelineEntityType.YARN_APPLICATION.toString());
+        TimelineEntities entities = new TimelineEntities();
+        entities.addEntity(resultEntity);
+        putEntitiesAsync(entities, getCurrentUser());
+      } catch (Exception e) {
+        LOG.error("Error aggregating timeline metrics", e);
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("App-level real-time aggregation complete");
+      }
+    }
+
+    @Override
+    public void run() {
+      aggregate();
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb71b13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index 725e441..d4cde64 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -118,7 +118,7 @@ public class PerNodeTimelineCollectorsAuxService extends AuxiliaryService {
    */
   public boolean addApplication(ApplicationId appId) {
     AppLevelTimelineCollector collector =
-        new AppLevelTimelineCollector(appId);
+        new AppLevelTimelineCollectorWithAgg(appId);
     return (collectorManager.putIfAbsent(appId, collector)
         == collector);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb71b13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
index 7bc89c5..a59f8c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestNMTimelineCollectorManager.java
@@ -95,7 +95,7 @@ public class TestNMTimelineCollectorManager {
       Callable<Boolean> task = new Callable<Boolean>() {
         public Boolean call() {
           AppLevelTimelineCollector collector =
-              new AppLevelTimelineCollector(appId);
+              new AppLevelTimelineCollectorWithAgg(appId);
           return (collectorManager.putIfAbsent(appId, collector) == collector);
         }
       };
@@ -126,7 +126,7 @@ public class TestNMTimelineCollectorManager {
       Callable<Boolean> task = new Callable<Boolean>() {
         public Boolean call() {
           AppLevelTimelineCollector collector =
-              new AppLevelTimelineCollector(appId);
+              new AppLevelTimelineCollectorWithAgg(appId);
           boolean successPut =
               (collectorManager.putIfAbsent(appId, collector) == collector);
           return successPut && collectorManager.remove(appId);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: YARN-6323. Rolling upgrade/config change is broken on timeline v2. (Vrushali C via Haibo Chen)

Posted by st...@apache.org.
YARN-6323. Rolling upgrade/config change is broken on timeline v2. (Vrushali C via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b08f365
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b08f365
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b08f365

Branch: refs/heads/HADOOP-13345
Commit: 9b08f365d749185d7ed8e34dc379b2e415a29e99
Parents: b2efebd
Author: Haibo Chen <ha...@apache.org>
Authored: Mon Aug 21 10:45:10 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java    |  2 ++
 .../containermanager/ContainerManagerImpl.java            | 10 ++++++++++
 2 files changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b08f365/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8a513ac..be45ddf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2221,6 +2221,8 @@ public class YarnConfiguration extends Configuration {
 
   public static final int DEFAULT_NUMBER_OF_ASYNC_ENTITIES_TO_MERGE = 10;
 
+  /** default version for any flow. */
+  public static final String DEFAULT_FLOW_VERSION = "1";
 
   /**
    * The time period for which timeline v2 client will wait for draining

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b08f365/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index c7880d5..e497f62 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -402,6 +402,16 @@ public class ContainerManagerImpl extends CompositeService implements
         LOG.debug(
             "Recovering Flow context: " + fc + " for an application " + appId);
       }
+    } else {
+      // in upgrade situations, where there is no prior existing flow context,
+      // default would be used.
+      fc = new FlowContext(TimelineUtils.generateDefaultFlowName(null, appId),
+          YarnConfiguration.DEFAULT_FLOW_VERSION, appId.getClusterTimestamp());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "No prior existing flow context found. Using default Flow context: "
+                + fc + " for an application " + appId);
+      }
     }
 
     LOG.info("Recovering application " + appId);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: YARN-5355: YARN Timeline Service v.2: alpha 2 (varunsaxena)

Posted by st...@apache.org.
YARN-5355: YARN Timeline Service v.2: alpha 2 (varunsaxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32cba6c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32cba6c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32cba6c3

Branch: refs/heads/HADOOP-13345
Commit: 32cba6c3036dfb1dcb0a4fd77a68ddad17dd4082
Parents: 4cae120 3d00c8f
Author: Varun Saxena <va...@apache.org>
Authored: Wed Aug 30 11:41:10 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:41:10 2017 +0530

----------------------------------------------------------------------
 .../src/main/conf/hadoop-policy.xml             |   11 +
 .../jobhistory/JobHistoryEventHandler.java      |   27 +-
 .../v2/app/job/impl/TaskAttemptImpl.java        |   58 +-
 .../mapreduce/v2/app/job/impl/TaskImpl.java     |   19 +-
 .../v2/app/rm/RMContainerAllocator.java         |   14 +-
 .../hadoop/mapreduce/jobhistory/TestEvents.java |    4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |    8 +-
 .../v2/app/rm/TestRMContainerAllocator.java     |  136 +++
 .../jobhistory/MapAttemptFinishedEvent.java     |   87 +-
 .../jobhistory/ReduceAttemptFinishedEvent.java  |   83 +-
 .../jobhistory/TaskAttemptFinishedEvent.java    |   47 +-
 .../TaskAttemptUnsuccessfulCompletionEvent.java |   48 +-
 .../mapreduce/jobhistory/TaskFailedEvent.java   |   51 +-
 .../mapreduce/jobhistory/TaskFinishedEvent.java |   42 +-
 .../mapred/TestMRTimelineEventHandling.java     |   30 +-
 .../mapreduce/JobHistoryFileReplayMapperV1.java |    3 +-
 .../hadoop/mapreduce/SimpleEntityWriterV1.java  |    3 +-
 hadoop-project/src/site/markdown/index.md.vm    |    8 +-
 .../api/protocolrecords/AllocateResponse.java   |   32 +-
 .../hadoop/yarn/api/records/CollectorInfo.java  |   59 +
 .../timelineservice/ApplicationEntity.java      |   28 +
 .../records/timelineservice/TimelineEntity.java |   52 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |   75 +-
 .../hadoop/yarn/util/TimelineServiceHelper.java |    8 +
 .../src/main/proto/yarn_protos.proto            |    5 +
 .../src/main/proto/yarn_service_protos.proto    |    2 +-
 .../timelineservice/TestApplicationEntity.java  |   71 ++
 .../yarn/conf/TestYarnConfigurationFields.java  |    2 +
 .../distributedshell/ApplicationMaster.java     |   42 +-
 .../distributedshell/TestDistributedShell.java  |  173 +--
 .../api/async/impl/AMRMClientAsyncImpl.java     |   19 +-
 .../ApplicationMasterServiceProtoTestBase.java  |   72 ++
 .../hadoop/yarn/client/ProtocolHATestBase.java  |   20 +-
 ...ationMasterServiceProtocolForTimelineV2.java |   71 ++
 ...estApplicationMasterServiceProtocolOnHA.java |   46 +-
 .../api/async/impl/TestAMRMClientAsync.java     |    2 +-
 .../impl/pb/AllocateResponsePBImpl.java         |   37 +-
 .../records/impl/pb/CollectorInfoPBImpl.java    |  152 +++
 .../yarn/client/api/TimelineV2Client.java       |   10 +-
 .../client/api/impl/TimelineV2ClientImpl.java   |  117 +-
 .../src/main/resources/yarn-default.xml         |   44 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |    2 +
 .../api/impl/TestTimelineClientV2Impl.java      |   56 +-
 .../ApplicationHistoryServer.java               |   79 +-
 .../security/TimelineAuthenticationFilter.java  |   49 -
 ...TimelineAuthenticationFilterInitializer.java |  129 ---
 ...lineDelegationTokenSecretManagerService.java |  240 -----
 ...neV1DelegationTokenSecretManagerService.java |  225 ++++
 .../TestTimelineAuthenticationFilter.java       |  323 ------
 .../TestTimelineAuthenticationFilterForV1.java  |  332 ++++++
 ...TimelineAuthenticationFilterInitializer.java |   76 --
 .../protocolrecords/NodeHeartbeatRequest.java   |   13 +-
 .../protocolrecords/NodeHeartbeatResponse.java  |    8 +-
 .../ReportNewCollectorInfoRequest.java          |   13 +-
 .../impl/pb/NodeHeartbeatRequestPBImpl.java     |   74 +-
 .../impl/pb/NodeHeartbeatResponsePBImpl.java    |   61 +-
 .../pb/ReportNewCollectorInfoRequestPBImpl.java |   36 +-
 .../server/api/records/AppCollectorData.java    |  125 +++
 .../server/api/records/AppCollectorsMap.java    |   46 -
 .../records/impl/pb/AppCollectorDataPBImpl.java |  227 ++++
 .../records/impl/pb/AppCollectorsMapPBImpl.java |  152 ---
 .../api/records/impl/pb/package-info.java       |   19 +
 .../security/TimelineAuthenticationFilter.java  |   55 +
 ...TimelineAuthenticationFilterInitializer.java |  137 +++
 ...elineDelgationTokenSecretManagerService.java |   83 ++
 .../server/timeline/security/package-info.java  |   26 +
 .../util/timeline/TimelineServerUtils.java      |   92 ++
 .../yarn/server/util/timeline/package-info.java |   25 +
 .../yarn_server_common_service_protos.proto     |   16 +-
 .../java/org/apache/hadoop/yarn/TestRPC.java    |   36 +-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |   48 +-
 ...TimelineAuthenticationFilterInitializer.java |   78 ++
 .../hadoop/yarn/server/nodemanager/Context.java |   14 +-
 .../yarn/server/nodemanager/NodeManager.java    |   64 +-
 .../nodemanager/NodeStatusUpdaterImpl.java      |   57 +-
 .../collectormanager/NMCollectorService.java    |   43 +-
 .../containermanager/AuxServices.java           |    3 +-
 .../containermanager/ContainerManagerImpl.java  |   16 +-
 .../ApplicationContainerFinishedEvent.java      |    9 +-
 .../application/ApplicationImpl.java            |   33 +-
 .../containermanager/container/Container.java   |    2 +
 .../container/ContainerImpl.java                |   22 +-
 .../recovery/NMLeveldbStateStoreService.java    |   21 +-
 .../recovery/NMNullStateStoreService.java       |    2 +-
 .../recovery/NMStateStoreService.java           |   13 +-
 .../security/authorize/NMPolicyProvider.java    |   22 +-
 .../timelineservice/NMTimelinePublisher.java    |   52 +-
 .../amrmproxy/BaseAMRMProxyTest.java            |    9 +-
 .../application/TestApplication.java            |    2 +-
 .../recovery/NMMemoryStateStoreService.java     |    4 +-
 .../TestNMLeveldbStateStoreService.java         |    6 +-
 .../nodemanager/webapp/MockContainer.java       |    4 +
 .../nodemanager/webapp/TestNMWebServer.java     |    4 +-
 .../ApplicationMasterService.java               |    5 +-
 .../resourcemanager/DefaultAMSProcessor.java    |    8 +-
 .../resourcemanager/ResourceTrackerService.java |   88 +-
 .../metrics/TimelineServiceV2Publisher.java     |   12 +-
 .../server/resourcemanager/rmapp/RMApp.java     |   35 +-
 .../resourcemanager/rmapp/RMAppEventType.java   |    3 -
 .../server/resourcemanager/rmapp/RMAppImpl.java |   27 +-
 .../yarn/server/resourcemanager/MockNM.java     |   16 +
 .../TestRMHATimelineCollectors.java             |  126 +++
 .../TestResourceTrackerService.java             |   32 +-
 .../applicationsmanager/MockAsm.java            |   17 +-
 .../TestSystemMetricsPublisherForV2.java        |   13 +-
 .../server/resourcemanager/rmapp/MockRMApp.java |   18 +-
 .../hadoop-yarn-server-tests/pom.xml            |   11 +
 .../hadoop/yarn/server/TestRMNMSecretKeys.java  |   34 +-
 .../TestTimelineServiceClientIntegration.java   |   15 +-
 .../security/TestTimelineAuthFilterForV2.java   |  478 +++++++++
 .../src/test/resources/krb5.conf                |   28 -
 .../AbstractTimelineReaderHBaseTestBase.java    |  177 +++
 ...stTimelineReaderWebServicesHBaseStorage.java | 1007 ++++++++++++------
 .../storage/DataGeneratorForTest.java           |  423 +++++---
 .../storage/TestHBaseTimelineStorageApps.java   |  442 +++++---
 .../TestHBaseTimelineStorageEntities.java       |  544 +++++++---
 .../storage/TestHBaseTimelineStorageSchema.java |  135 +++
 .../storage/flow/TestFlowDataGenerator.java     |   28 +-
 .../flow/TestHBaseStorageFlowActivity.java      |   72 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  237 +++--
 .../flow/TestHBaseStorageFlowRunCompaction.java |   67 +-
 .../reader/filter/TimelineFilterUtils.java      |   17 +
 .../storage/HBaseTimelineReaderImpl.java        |   12 +-
 .../storage/HBaseTimelineWriterImpl.java        |  294 ++---
 .../storage/TimelineSchemaCreator.java          |   52 +-
 .../storage/application/ApplicationColumn.java  |   48 -
 .../application/ApplicationColumnPrefix.java    |   52 -
 .../storage/application/ApplicationRowKey.java  |   49 +-
 .../storage/application/ApplicationTable.java   |    2 +-
 .../storage/apptoflow/AppToFlowColumn.java      |   47 -
 .../apptoflow/AppToFlowColumnPrefix.java        |  206 ++++
 .../storage/apptoflow/AppToFlowRowKey.java      |  101 +-
 .../storage/apptoflow/AppToFlowTable.java       |   21 +-
 .../storage/common/BaseTable.java               |   37 +-
 .../storage/common/ColumnHelper.java            |   50 +-
 .../common/HBaseTimelineStorageUtils.java       |  124 ++-
 .../storage/common/KeyConverterToString.java    |   38 +
 .../storage/common/LongConverter.java           |    2 +-
 .../storage/entity/EntityColumn.java            |   48 -
 .../storage/entity/EntityColumnPrefix.java      |   51 -
 .../storage/entity/EntityRowKey.java            |  100 +-
 .../storage/entity/EntityRowKeyPrefix.java      |   11 +-
 .../storage/entity/EntityTable.java             |    4 +-
 .../storage/flow/FlowActivityColumnPrefix.java  |   58 +-
 .../storage/flow/FlowActivityRowKey.java        |   59 +-
 .../storage/flow/FlowRunColumn.java             |   53 +-
 .../storage/flow/FlowRunColumnPrefix.java       |   53 +-
 .../storage/flow/FlowRunCoprocessor.java        |   36 +-
 .../storage/flow/FlowRunRowKey.java             |   47 +-
 .../storage/flow/FlowRunTable.java              |   13 +-
 .../timelineservice/storage/package-info.java   |    6 +-
 .../reader/AbstractTimelineStorageReader.java   |  158 +++
 .../storage/reader/ApplicationEntityReader.java |   77 +-
 .../storage/reader/EntityTypeReader.java        |  179 ++++
 .../reader/FlowActivityEntityReader.java        |   30 +-
 .../storage/reader/FlowRunEntityReader.java     |   53 +-
 .../storage/reader/GenericEntityReader.java     |  201 ++--
 .../reader/SubApplicationEntityReader.java      |  488 +++++++++
 .../storage/reader/TimelineEntityReader.java    |   60 +-
 .../reader/TimelineEntityReaderFactory.java     |   18 +-
 .../subapplication/SubApplicationColumn.java    |  108 ++
 .../SubApplicationColumnFamily.java             |   68 ++
 .../SubApplicationColumnPrefix.java             |  250 +++++
 .../subapplication/SubApplicationRowKey.java    |  290 +++++
 .../SubApplicationRowKeyPrefix.java             |   69 ++
 .../subapplication/SubApplicationTable.java     |  174 +++
 .../storage/subapplication/package-info.java    |   28 +
 .../common/TestHBaseTimelineStorageUtils.java   |   33 +
 .../storage/common/TestKeyConverters.java       |    4 +
 .../storage/common/TestRowKeys.java             |   54 +-
 .../storage/common/TestRowKeysAsString.java     |  144 +++
 .../collector/AppLevelTimelineCollector.java    |  143 +--
 .../AppLevelTimelineCollectorWithAgg.java       |  150 +++
 .../collector/NodeTimelineCollectorManager.java |  275 ++++-
 .../PerNodeTimelineCollectorsAuxService.java    |   12 +-
 .../collector/TimelineCollector.java            |   18 +-
 .../collector/TimelineCollectorManager.java     |   14 +-
 .../reader/TimelineDataToRetrieve.java          |   35 +-
 .../reader/TimelineEntityFilters.java           |  160 +--
 .../reader/TimelineReaderContext.java           |   37 +-
 .../reader/TimelineReaderManager.java           |   40 +-
 .../reader/TimelineReaderServer.java            |   77 +-
 .../reader/TimelineReaderUtils.java             |   31 +-
 .../reader/TimelineReaderWebServices.java       |  693 ++++++++++--
 .../reader/TimelineReaderWebServicesUtils.java  |   98 +-
 .../reader/TimelineUIDConverter.java            |   84 +-
 ...neReaderAuthenticationFilterInitializer.java |   53 +
 ...elineReaderWhitelistAuthorizationFilter.java |  123 +++
 ...WhitelistAuthorizationFilterInitializer.java |   66 ++
 .../reader/security/package-info.java           |   25 +
 .../CollectorNodemanagerSecurityInfo.java       |   69 ++
 ...neV2DelegationTokenSecretManagerService.java |  126 +++
 .../timelineservice/security/package-info.java  |   25 +
 .../storage/FileSystemTimelineReaderImpl.java   |   21 +
 .../storage/FileSystemTimelineWriterImpl.java   |   15 +-
 .../timelineservice/storage/TimelineReader.java |   23 +-
 .../timelineservice/storage/TimelineWriter.java |   28 +-
 .../storage/common/TimelineStorageUtils.java    |    1 -
 .../org.apache.hadoop.security.SecurityInfo     |   14 +
 .../TestNMTimelineCollectorManager.java         |    4 +-
 .../collector/TestTimelineCollector.java        |   12 +-
 .../reader/TestTimelineReaderWebServices.java   |    4 +-
 ...elineReaderWhitelistAuthorizationFilter.java |  380 +++++++
 .../reader/TestTimelineUIDConverter.java        |   17 +-
 .../TestFileSystemTimelineReaderImpl.java       |  115 +-
 .../TestFileSystemTimelineWriterImpl.java       |    8 +-
 .../src/site/markdown/TimelineServiceV2.md      |  433 +++++++-
 207 files changed, 12631 insertions(+), 4174 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-7041. Nodemanager NPE running jobs with security off. Contributed by Varun Saxena.

Posted by st...@apache.org.
YARN-7041. Nodemanager NPE running jobs with security off. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e276c75e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e276c75e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e276c75e

Branch: refs/heads/HADOOP-13345
Commit: e276c75ec17634fc3b521fdb15b6ac141b001274
Parents: 32188d3
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Sat Aug 19 11:37:17 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 .../records/impl/pb/CollectorInfoPBImpl.java    |  8 +++--
 .../impl/pb/NodeHeartbeatRequestPBImpl.java     | 22 +++++++++----
 .../impl/pb/NodeHeartbeatResponsePBImpl.java    | 23 ++++++++-----
 .../records/impl/pb/AppCollectorDataPBImpl.java |  8 +++--
 .../hadoop/yarn/TestYarnServerApiClasses.java   | 34 +++++++++++++++++---
 5 files changed, 71 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e276c75e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
index bb54133..5835d1a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
@@ -114,9 +114,13 @@ public class CollectorInfoPBImpl extends CollectorInfo {
   @Override
   public Token getCollectorToken() {
     CollectorInfoProtoOrBuilder p = viaProto ? proto : builder;
-    if (this.collectorToken == null && p.hasCollectorToken()) {
-      this.collectorToken = convertFromProtoFormat(p.getCollectorToken());
+    if (this.collectorToken != null) {
+      return this.collectorToken;
+    }
+    if (!p.hasCollectorToken()) {
+      return null;
     }
+    this.collectorToken = convertFromProtoFormat(p.getCollectorToken());
     return this.collectorToken;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e276c75e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
index c07a6eb..1ffd223 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
@@ -168,12 +168,17 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
     for (Map.Entry<ApplicationId, AppCollectorData> entry :
         registeringCollectors.entrySet()) {
       AppCollectorData data = entry.getValue();
-      builder.addRegisteringCollectors(AppCollectorDataProto.newBuilder()
-          .setAppId(convertToProtoFormat(entry.getKey()))
-          .setAppCollectorAddr(data.getCollectorAddr())
-          .setAppCollectorToken(convertToProtoFormat(data.getCollectorToken()))
-          .setRmIdentifier(data.getRMIdentifier())
-          .setVersion(data.getVersion()));
+      AppCollectorDataProto.Builder appCollectorDataBuilder =
+          AppCollectorDataProto.newBuilder()
+              .setAppId(convertToProtoFormat(entry.getKey()))
+              .setAppCollectorAddr(data.getCollectorAddr())
+              .setRmIdentifier(data.getRMIdentifier())
+              .setVersion(data.getVersion());
+      if (data.getCollectorToken() != null) {
+        appCollectorDataBuilder.setAppCollectorToken(
+            convertToProtoFormat(data.getCollectorToken()));
+      }
+      builder.addRegisteringCollectors(appCollectorDataBuilder);
     }
   }
 
@@ -274,7 +279,10 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
       this.registeringCollectors = new HashMap<>();
       for (AppCollectorDataProto c : list) {
         ApplicationId appId = convertFromProtoFormat(c.getAppId());
-        Token collectorToken = convertFromProtoFormat(c.getAppCollectorToken());
+        Token collectorToken = null;
+        if (c.hasAppCollectorToken()){
+          collectorToken = convertFromProtoFormat(c.getAppCollectorToken());
+        }
         AppCollectorData data = AppCollectorData.newInstance(appId,
             c.getAppCollectorAddr(), c.getRmIdentifier(), c.getVersion(),
             collectorToken);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e276c75e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
index a1e6a21..11f5f61 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -154,13 +154,17 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse {
     for (Map.Entry<ApplicationId, AppCollectorData> entry
         : appCollectorsMap.entrySet()) {
       AppCollectorData data = entry.getValue();
-      builder.addAppCollectors(AppCollectorDataProto.newBuilder()
-          .setAppId(convertToProtoFormat(entry.getKey()))
-          .setAppCollectorAddr(data.getCollectorAddr())
-          .setAppCollectorToken(
-              convertToProtoFormat(entry.getValue().getCollectorToken()))
-          .setRmIdentifier(data.getRMIdentifier())
-          .setVersion(data.getVersion()));
+      AppCollectorDataProto.Builder appCollectorDataBuilder =
+          AppCollectorDataProto.newBuilder()
+              .setAppId(convertToProtoFormat(entry.getKey()))
+              .setAppCollectorAddr(data.getCollectorAddr())
+              .setRmIdentifier(data.getRMIdentifier())
+              .setVersion(data.getVersion());
+      if (data.getCollectorToken() != null) {
+        appCollectorDataBuilder.setAppCollectorToken(
+            convertToProtoFormat(data.getCollectorToken()));
+      }
+      builder.addAppCollectors(appCollectorDataBuilder);
     }
   }
 
@@ -604,7 +608,10 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse {
       this.appCollectorsMap = new HashMap<>();
       for (AppCollectorDataProto c : list) {
         ApplicationId appId = convertFromProtoFormat(c.getAppId());
-        Token collectorToken = convertFromProtoFormat(c.getAppCollectorToken());
+        Token collectorToken = null;
+        if (c.hasAppCollectorToken()){
+          collectorToken = convertFromProtoFormat(c.getAppCollectorToken());
+        }
         AppCollectorData data = AppCollectorData.newInstance(appId,
             c.getAppCollectorAddr(), c.getRmIdentifier(), c.getVersion(),
             collectorToken);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e276c75e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
index 7144f51..c08e9ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
@@ -163,9 +163,13 @@ public class AppCollectorDataPBImpl extends AppCollectorData {
   @Override
   public Token getCollectorToken() {
     AppCollectorDataProtoOrBuilder p = viaProto ? proto : builder;
-    if (this.collectorToken == null && p.hasAppCollectorToken()) {
-      this.collectorToken = new TokenPBImpl(p.getAppCollectorToken());
+    if (this.collectorToken != null) {
+      return this.collectorToken;
+    }
+    if (!p.hasAppCollectorToken()) {
+      return null;
     }
+    this.collectorToken = new TokenPBImpl(p.getAppCollectorToken());
     return this.collectorToken;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e276c75e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
index a1890dd..8b1d0bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
@@ -111,7 +111,7 @@ public class TestYarnServerApiClasses {
     original.setLastKnownNMTokenMasterKey(getMasterKey());
     original.setNodeStatus(getNodeStatus());
     original.setNodeLabels(getValidNodeLabels());
-    Map<ApplicationId, AppCollectorData> collectors = getCollectors();
+    Map<ApplicationId, AppCollectorData> collectors = getCollectors(false);
     original.setRegisteringCollectors(collectors);
     NodeHeartbeatRequestPBImpl copy = new NodeHeartbeatRequestPBImpl(
         original.getProto());
@@ -130,6 +130,16 @@ public class TestYarnServerApiClasses {
     Assert.assertEquals(0, copy.getNodeLabels().size());
   }
 
+  @Test
+  public void testNodeHBRequestPBImplWithNullCollectorToken() {
+    NodeHeartbeatRequestPBImpl original = new NodeHeartbeatRequestPBImpl();
+    Map<ApplicationId, AppCollectorData> collectors = getCollectors(true);
+    original.setRegisteringCollectors(collectors);
+    NodeHeartbeatRequestPBImpl copy = new NodeHeartbeatRequestPBImpl(
+        original.getProto());
+    assertEquals(collectors, copy.getRegisteringCollectors());
+  }
+
   /**
    * Test NodeHeartbeatRequestPBImpl.
    */
@@ -155,7 +165,7 @@ public class TestYarnServerApiClasses {
     original.setNextHeartBeatInterval(1000);
     original.setNodeAction(NodeAction.NORMAL);
     original.setResponseId(100);
-    Map<ApplicationId, AppCollectorData> collectors = getCollectors();
+    Map<ApplicationId, AppCollectorData> collectors = getCollectors(false);
     original.setAppCollectors(collectors);
 
     NodeHeartbeatResponsePBImpl copy = new NodeHeartbeatResponsePBImpl(
@@ -180,6 +190,16 @@ public class TestYarnServerApiClasses {
   }
 
   @Test
+  public void testNodeHBResponsePBImplWithNullCollectorToken() {
+    NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
+    Map<ApplicationId, AppCollectorData> collectors = getCollectors(true);
+    original.setAppCollectors(collectors);
+    NodeHeartbeatResponsePBImpl copy = new NodeHeartbeatResponsePBImpl(
+        original.getProto());
+    assertEquals(collectors, copy.getAppCollectors());
+  }
+
+  @Test
   public void testNodeHeartbeatResponsePBImplWithDecreasedContainers() {
     NodeHeartbeatResponsePBImpl original = new NodeHeartbeatResponsePBImpl();
     original.addAllContainersToUpdate(
@@ -349,11 +369,15 @@ public class TestYarnServerApiClasses {
     return nodeLabels;
   }
 
-  private Map<ApplicationId, AppCollectorData> getCollectors() {
+  private Map<ApplicationId, AppCollectorData> getCollectors(
+      boolean hasNullCollectorToken) {
     ApplicationId appID = ApplicationId.newInstance(1L, 1);
     String collectorAddr = "localhost:0";
-    AppCollectorData data = AppCollectorData.newInstance(appID, collectorAddr,
-        Token.newInstance(new byte[0], "kind", new byte[0], "s"));
+    AppCollectorData data = AppCollectorData.newInstance(appID, collectorAddr);
+    if (!hasNullCollectorToken) {
+      data.setCollectorToken(
+          Token.newInstance(new byte[0], "kind", new byte[0], "s"));
+    }
     Map<ApplicationId, AppCollectorData> collectorMap =
         new HashMap<>();
     collectorMap.put(appID, data);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: YARN-5647. [ATSv2 Security] Collector side changes for loading auth filters and principals. Contributed by Varun Saxena

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
new file mode 100644
index 0000000..78bf20f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/TimelineServerUtils.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.util.timeline;
+
+import java.util.LinkedHashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSecretManagerService;
+
+/**
+ * Set of utility methods to be used across timeline reader and collector.
+ */
+public final class TimelineServerUtils {
+  private static final Log LOG = LogFactory.getLog(TimelineServerUtils.class);
+
+  private TimelineServerUtils() {
+  }
+
+  /**
+   * Sets filter initializers configuration based on existing configuration and
+   * default filters added by timeline service(such as timeline auth filter and
+   * CORS filter).
+   * @param conf Configuration object.
+   * @param configuredInitializers Comma separated list of filter initializers.
+   * @param defaultInitializers Set of initializers added by default by timeline
+   *     service.
+   */
+  public static void setTimelineFilters(Configuration conf,
+      String configuredInitializers, Set<String> defaultInitializers) {
+    String[] parts = configuredInitializers.split(",");
+    Set<String> target = new LinkedHashSet<String>();
+    for (String filterInitializer : parts) {
+      filterInitializer = filterInitializer.trim();
+      if (filterInitializer.equals(
+          AuthenticationFilterInitializer.class.getName()) ||
+          filterInitializer.isEmpty()) {
+        continue;
+      }
+      target.add(filterInitializer);
+    }
+    target.addAll(defaultInitializers);
+    String actualInitializers =
+        org.apache.commons.lang.StringUtils.join(target, ",");
+    LOG.info("Filter initializers set for timeline service: " +
+        actualInitializers);
+    conf.set("hadoop.http.filter.initializers", actualInitializers);
+  }
+
+  /**
+   * Adds timeline authentication filter to the set of default filter
+   * initializers and assigns the delegation token manager service to it.
+   * @param initializers Comma separated list of filter initializers.
+   * @param defaultInitializers Set of initializers added by default by timeline
+   *     service.
+   * @param delegationTokenMgrService Delegation token manager service.
+   *     This will be used by timeline authentication filter to assign
+   *     delegation tokens.
+   */
+  public static void addTimelineAuthFilter(String initializers,
+      Set<String> defaultInitializers,
+      TimelineDelgationTokenSecretManagerService delegationTokenMgrService) {
+    TimelineAuthenticationFilter.setTimelineDelegationTokenSecretManager(
+        delegationTokenMgrService.getTimelineDelegationTokenSecretManager());
+    if (!initializers.contains(
+        TimelineAuthenticationFilterInitializer.class.getName())) {
+      defaultInitializers.add(
+          TimelineAuthenticationFilterInitializer.class.getName());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/package-info.java
new file mode 100644
index 0000000..75c6973
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/util/timeline/package-info.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.server.util.timeline contains utility classes used
+ * by ATSv1 and ATSv2 on the server side.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.util.timeline;
+import org.apache.hadoop.classification.InterfaceAudience;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
new file mode 100644
index 0000000..430911e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline.security;
+
+import org.junit.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.FilterContainer;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import static org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer.PREFIX;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+
+public class TestTimelineAuthenticationFilterInitializer {
+
+  @Test
+  public void testProxyUserConfiguration() {
+    FilterContainer container = Mockito.mock(FilterContainer.class);
+    for (int i = 0; i < 3; ++i) {
+      Configuration conf = new YarnConfiguration();
+      switch (i) {
+      case 0:
+        // hadoop.proxyuser prefix
+        conf.set("hadoop.proxyuser.foo.hosts", "*");
+        conf.set("hadoop.proxyuser.foo.users", "*");
+        conf.set("hadoop.proxyuser.foo.groups", "*");
+        break;
+      case 1:
+        // yarn.timeline-service.http-authentication.proxyuser prefix
+        conf.set(PREFIX + "proxyuser.foo.hosts", "*");
+        conf.set(PREFIX + "proxyuser.foo.users", "*");
+        conf.set(PREFIX + "proxyuser.foo.groups", "*");
+        break;
+      case 2:
+        // hadoop.proxyuser prefix has been overwritten by
+        // yarn.timeline-service.http-authentication.proxyuser prefix
+        conf.set("hadoop.proxyuser.foo.hosts", "bar");
+        conf.set("hadoop.proxyuser.foo.users", "bar");
+        conf.set("hadoop.proxyuser.foo.groups", "bar");
+        conf.set(PREFIX + "proxyuser.foo.hosts", "*");
+        conf.set(PREFIX + "proxyuser.foo.users", "*");
+        conf.set(PREFIX + "proxyuser.foo.groups", "*");
+        break;
+      default:
+        break;
+      }
+
+      TimelineAuthenticationFilterInitializer initializer =
+          new TimelineAuthenticationFilterInitializer();
+      initializer.initFilter(container, conf);
+      Assert.assertEquals(
+          "*", initializer.filterConfig.get("proxyuser.foo.hosts"));
+      Assert.assertEquals(
+          "*", initializer.filterConfig.get("proxyuser.foo.users"));
+      Assert.assertEquals(
+          "*", initializer.filterConfig.get("proxyuser.foo.groups"));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
index 1719782..bb51734 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
@@ -18,19 +18,19 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.collector;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
-
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
-import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.http.lib.StaticUserWebFilter;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -40,6 +40,8 @@ import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoRequest;
+import org.apache.hadoop.yarn.server.timelineservice.security.TimelineV2DelegationTokenSecretManagerService;
+import org.apache.hadoop.yarn.server.util.timeline.TimelineServerUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@@ -65,17 +67,51 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
 
   private volatile CollectorNodemanagerProtocol nmCollectorService;
 
+  private TimelineV2DelegationTokenSecretManagerService tokenMgrService;
+
+  private final boolean runningAsAuxService;
+
   static final String COLLECTOR_MANAGER_ATTR_KEY = "collector.manager";
 
   @VisibleForTesting
   protected NodeTimelineCollectorManager() {
+    this(true);
+  }
+
+  protected NodeTimelineCollectorManager(boolean asAuxService) {
     super(NodeTimelineCollectorManager.class.getName());
+    this.runningAsAuxService = asAuxService;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    tokenMgrService = new TimelineV2DelegationTokenSecretManagerService();
+    addService(tokenMgrService);
+    super.serviceInit(conf);
   }
 
   @Override
   protected void serviceStart() throws Exception {
-    startWebApp();
+    if (UserGroupInformation.isSecurityEnabled() && !runningAsAuxService) {
+      // Do security login for cases where collector is running outside NM.
+      try {
+        doSecureLogin();
+      } catch(IOException ie) {
+        throw new YarnRuntimeException("Failed to login", ie);
+      }
+    }
     super.serviceStart();
+    startWebApp();
+  }
+
+  private void doSecureLogin() throws IOException {
+    Configuration conf = getConfig();
+    InetSocketAddress addr = NetUtils.createSocketAddr(conf.getTrimmed(
+        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST), 0,
+                YarnConfiguration.TIMELINE_SERVICE_BIND_HOST);
+    SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+        YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, addr.getHostName());
   }
 
   @Override
@@ -105,6 +141,12 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
    */
   private void startWebApp() {
     Configuration conf = getConfig();
+    String initializers = conf.get("hadoop.http.filter.initializers", "");
+    Set<String> defaultInitializers = new LinkedHashSet<String>();
+    TimelineServerUtils.addTimelineAuthFilter(
+        initializers, defaultInitializers, tokenMgrService);
+    TimelineServerUtils.setTimelineFilters(
+        conf, initializers, defaultInitializers);
     String bindAddress = conf.get(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
         YarnConfiguration.DEFAULT_TIMELINE_SERVICE_BIND_HOST) + ":0";
     try {
@@ -114,16 +156,10 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
           .addEndpoint(URI.create(
               (YarnConfiguration.useHttps(conf) ? "https://" : "http://") +
                   bindAddress));
+      if (YarnConfiguration.useHttps(conf)) {
+        builder = WebAppUtils.loadSslConfiguration(builder, conf);
+      }
       timelineRestServer = builder.build();
-      // TODO: replace this by an authentication filter in future.
-      HashMap<String, String> options = new HashMap<>();
-      String username = conf.get(HADOOP_HTTP_STATIC_USER,
-          DEFAULT_HADOOP_HTTP_STATIC_USER);
-      options.put(HADOOP_HTTP_STATIC_USER, username);
-      HttpServer2.defineFilter(timelineRestServer.getWebAppContext(),
-          "static_user_filter_timeline",
-          StaticUserWebFilter.StaticUserFilter.class.getName(),
-          options, new String[] {"/*"});
 
       timelineRestServer.addJerseyResourcePackage(
           TimelineCollectorWebService.class.getPackage().getName() + ";"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index e4e6421..725e441 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -61,7 +61,7 @@ public class PerNodeTimelineCollectorsAuxService extends AuxiliaryService {
   private ScheduledExecutorService scheduler;
 
   public PerNodeTimelineCollectorsAuxService() {
-    this(new NodeTimelineCollectorManager());
+    this(new NodeTimelineCollectorManager(true));
   }
 
   @VisibleForTesting PerNodeTimelineCollectorsAuxService(
@@ -202,7 +202,8 @@ public class PerNodeTimelineCollectorsAuxService extends AuxiliaryService {
     PerNodeTimelineCollectorsAuxService auxService = null;
     try {
       auxService = collectorManager == null ?
-          new PerNodeTimelineCollectorsAuxService() :
+          new PerNodeTimelineCollectorsAuxService(
+              new NodeTimelineCollectorManager(false)) :
           new PerNodeTimelineCollectorsAuxService(collectorManager);
       ShutdownHookManager.get().addShutdownHook(new ShutdownHook(auxService),
           SHUTDOWN_HOOK_PRIORITY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 94b95ad..972bc01 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -29,7 +29,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class TimelineCollectorManager extends AbstractService {
+public class TimelineCollectorManager extends CompositeService {
   private static final Logger LOG =
       LoggerFactory.getLogger(TimelineCollectorManager.class);
 
@@ -57,7 +57,7 @@ public class TimelineCollectorManager extends AbstractService {
   private boolean writerFlusherRunning;
 
   @Override
-  public void serviceInit(Configuration conf) throws Exception {
+  protected void serviceInit(Configuration conf) throws Exception {
     writer = createTimelineWriter(conf);
     writer.init(conf);
     // create a single dedicated thread for flushing the writer on a periodic

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
new file mode 100644
index 0000000..eef8436
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.security;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSecretManagerService;
+
+/**
+ * The service wrapper of {@link TimelineV2DelegationTokenSecretManager}.
+ */
+public class TimelineV2DelegationTokenSecretManagerService extends
+    TimelineDelgationTokenSecretManagerService {
+  public TimelineV2DelegationTokenSecretManagerService() {
+    super(TimelineV2DelegationTokenSecretManagerService.class.getName());
+  }
+
+  @Override
+  protected AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier>
+      createTimelineDelegationTokenSecretManager(long secretKeyInterval,
+          long tokenMaxLifetime, long tokenRenewInterval,
+          long tokenRemovalScanInterval) {
+    return new TimelineV2DelegationTokenSecretManager(secretKeyInterval,
+        tokenMaxLifetime, tokenRenewInterval, tokenRemovalScanInterval);
+  }
+
+  /**
+   * Delegation token secret manager for ATSv2.
+   */
+  @Private
+  @Unstable
+  public static class TimelineV2DelegationTokenSecretManager extends
+      AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
+
+    /**
+     * Create a timeline v2 secret manager.
+     * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+     *        new secret keys.
+     * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+     *        tokens in milliseconds
+     * @param delegationTokenRenewInterval how often the tokens must be renewed
+     *        in milliseconds
+     * @param delegationTokenRemoverScanInterval how often the tokens are
+     *        scanned for expired tokens in milliseconds
+     */
+    public TimelineV2DelegationTokenSecretManager(
+        long delegationKeyUpdateInterval, long delegationTokenMaxLifetime,
+        long delegationTokenRenewInterval,
+        long delegationTokenRemoverScanInterval) {
+      super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+          delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+    }
+
+    @Override
+    public TimelineDelegationTokenIdentifier createIdentifier() {
+      return new TimelineDelegationTokenIdentifier();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/package-info.java
new file mode 100644
index 0000000..8250092
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/package-info.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.server.timelineservice.security contains classes
+ * to be used to generate delegation tokens for ATSv2.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.timelineservice.security;
+import org.apache.hadoop.classification.InterfaceAudience;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: MAPREDUCE-6931. Remove TestDFSIO "Total Throughput" calculation. Contributed by Dennis Huo.

Posted by st...@apache.org.
MAPREDUCE-6931. Remove TestDFSIO "Total Throughput" calculation. Contributed by Dennis Huo.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e0e2033
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e0e2033
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e0e2033

Branch: refs/heads/HADOOP-13345
Commit: 3e0e2033cc28d48a7909822416478aff7487bfe6
Parents: 4148023
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Tue Aug 22 11:05:47 2017 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Wed Aug 30 14:07:23 2017 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/fs/TestDFSIO.java    | 11 +++++++----
 1 file changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e0e2033/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
index 12fbdad..1d495c4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
@@ -855,7 +855,7 @@ public class TestDFSIO implements Tool {
       long tStart = System.currentTimeMillis();
       sequentialTest(fs, testType, nrBytes, nrFiles);
       long execTime = System.currentTimeMillis() - tStart;
-      String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000;
+      String resultLine = "Seq Test exec time sec: " + msToSecs(execTime);
       LOG.info(resultLine);
       return 0;
     }
@@ -919,6 +919,10 @@ public class TestDFSIO implements Tool {
     return ((float)bytes)/MEGA;
   }
 
+  static float msToSecs(long timeMillis) {
+    return timeMillis / 1000.0f;
+  }
+
   private boolean checkErasureCodePolicy(String erasureCodePolicyName,
       FileSystem fs, TestType testType) throws IOException {
     Collection<ErasureCodingPolicy> list =
@@ -1051,11 +1055,10 @@ public class TestDFSIO implements Tool {
         "            Date & time: " + new Date(System.currentTimeMillis()),
         "        Number of files: " + tasks,
         " Total MBytes processed: " + df.format(toMB(size)),
-        "      Throughput mb/sec: " + df.format(size * 1000.0 / (time * MEGA)),
-        "Total Throughput mb/sec: " + df.format(toMB(size) / ((float)execTime)),
+        "      Throughput mb/sec: " + df.format(toMB(size) / msToSecs(time)),
         " Average IO rate mb/sec: " + df.format(med),
         "  IO rate std deviation: " + df.format(stdDev),
-        "     Test exec time sec: " + df.format((float)execTime / 1000),
+        "     Test exec time sec: " + df.format(msToSecs(execTime)),
         "" };
 
     PrintStream res = null;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-4455. Support fetching metrics by time range. Contributed by Varun Saxena.

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 360ac20..d67de71 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -265,6 +265,11 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entities
+   *     would not contain metric values before this timestamp(Optional query
+   *     param).
+   * @param metricsTimeEnd If specified, returned metrics for the entities would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param fromId If specified, retrieve the next set of entities from the
    *     given fromId. The set of entities retrieved is inclusive of specified
    *     fromId. fromId should be taken from the value associated with FROM_ID
@@ -300,6 +305,8 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
@@ -326,7 +333,8 @@ public class TimelineReaderWebServices {
               infofilters, conffilters, metricfilters, eventfilters,
               fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime,
           "createdTime start/end or limit or flowrunid");
@@ -407,6 +415,11 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entities
+   *     would not contain metric values before this timestamp(Optional query
+   *     param).
+   * @param metricsTimeEnd If specified, returned metrics for the entities would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param fromId If specified, retrieve the next set of entities from the
    *     given fromId. The set of entities retrieved is inclusive of specified
    *     fromId. fromId should be taken from the value associated with FROM_ID
@@ -447,12 +460,14 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     return getEntities(req, res, null, appId, entityType, userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
         confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
-        fromId);
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -523,6 +538,11 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entities
+   *     would not contain metric values before this timestamp(Optional query
+   *     param).
+   * @param metricsTimeEnd If specified, returned metrics for the entities would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param fromId If specified, retrieve the next set of entities from the
    *     given fromId. The set of entities retrieved is inclusive of specified
    *     fromId. fromId should be taken from the value associated with FROM_ID
@@ -564,6 +584,8 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
@@ -586,7 +608,8 @@ public class TimelineReaderWebServices {
               infofilters, conffilters, metricfilters, eventfilters,
               fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime,
           "createdTime start/end or limit or flowrunid");
@@ -628,6 +651,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entity would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the entity would
+   *     not contain metric values after this timestamp(Optional query param).
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -649,7 +676,9 @@ public class TimelineReaderWebServices {
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -669,7 +698,8 @@ public class TimelineReaderWebServices {
       }
       entity = timelineReaderManager.getEntity(context,
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -723,6 +753,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entity would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the entity would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
    *     If specified, then entity retrieval will be faster.
    *
@@ -752,10 +786,12 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("entityidprefix") String entityIdPrefix) {
     return getEntity(req, res, null, appId, entityType, entityId, userId,
         flowName, flowRunId, confsToRetrieve, metricsToRetrieve, fields,
-        metricsLimit, entityIdPrefix);
+        metricsLimit, metricsTimeStart, metricsTimeEnd, entityIdPrefix);
   }
 
   /**
@@ -797,6 +833,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the entity would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the entity would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
    *     If specified, then entity retrieval will be faster.
    *
@@ -827,6 +867,8 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("entityidprefix") String entityIdPrefix) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
@@ -845,7 +887,8 @@ public class TimelineReaderWebServices {
               clusterId, userId, flowName, flowRunId, appId, entityType,
               entityIdPrefix, entityId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -912,7 +955,7 @@ public class TimelineReaderWebServices {
       context.setEntityType(TimelineEntityType.YARN_FLOW_RUN.toString());
       entity = timelineReaderManager.getEntity(context,
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          null, metricsToRetrieve, null, null));
+          null, metricsToRetrieve, null, null, null, null));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -1021,7 +1064,7 @@ public class TimelineReaderWebServices {
           clusterId, userId, flowName, flowRunId, null,
               TimelineEntityType.YARN_FLOW_RUN.toString(), null, null),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          null, metricsToRetrieve, null, null));
+          null, metricsToRetrieve, null, null, null, null));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -1115,7 +1158,7 @@ public class TimelineReaderWebServices {
           limit, createdTimeStart, createdTimeEnd, null, null, null,
               null, null, null, fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          null, metricsToRetrieve, fields, null));
+          null, metricsToRetrieve, fields, null, null, null));
     } catch (Exception e) {
       handleException(e, url, startTime,
           "createdTime start/end or limit or fromId");
@@ -1265,7 +1308,7 @@ public class TimelineReaderWebServices {
           limit, createdTimeStart, createdTimeEnd, null, null, null,
               null, null, null, fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          null, metricsToRetrieve, fields, null));
+          null, metricsToRetrieve, fields, null, null, null));
     } catch (Exception e) {
       handleException(e, url, startTime,
           "createdTime start/end or limit or fromId");
@@ -1400,7 +1443,7 @@ public class TimelineReaderWebServices {
           clusterId, null, null, null, null,
               TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null, null),
           entityFilters, TimelineReaderWebServicesUtils.
-          createTimelineDataToRetrieve(null, null, null, null));
+              createTimelineDataToRetrieve(null, null, null, null, null, null));
     } catch (Exception e) {
       handleException(e, url, startTime, "limit");
     }
@@ -1441,6 +1484,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -1462,7 +1509,9 @@ public class TimelineReaderWebServices {
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1483,7 +1532,8 @@ public class TimelineReaderWebServices {
       context.setEntityType(TimelineEntityType.YARN_APPLICATION.toString());
       entity = timelineReaderManager.getEntity(context,
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -1532,6 +1582,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the app would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app would
+   *     not contain metric values after this timestamp(Optional query param).
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -1556,9 +1610,12 @@ public class TimelineReaderWebServices {
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd) {
     return getApp(req, res, null, appId, flowName, flowRunId, userId,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd);
   }
 
   /**
@@ -1596,6 +1653,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the app would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app would
+   *     not contain metric values after this timestamp(Optional query param).
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     <cite>TimelineEntity</cite> instance is returned.<br>
@@ -1621,7 +1682,9 @@ public class TimelineReaderWebServices {
       @QueryParam("confstoretrieve") String confsToRetrieve,
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
-      @QueryParam("metricslimit") String metricsLimit) {
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
             QUERY_STRING_SEP + req.getQueryString());
@@ -1639,7 +1702,8 @@ public class TimelineReaderWebServices {
           clusterId, userId, flowName, flowRunId, appId,
               TimelineEntityType.YARN_APPLICATION.toString(), null, null),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime, "flowrunid");
     }
@@ -1712,6 +1776,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param fromId If specified, retrieve the next set of applications
    *     from the given fromId. The set of applications retrieved is inclusive
    *     of specified fromId. fromId should be taken from the value associated
@@ -1746,6 +1814,8 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
@@ -1771,7 +1841,8 @@ public class TimelineReaderWebServices {
               infofilters, conffilters, metricfilters, eventfilters,
               fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
-          confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
     } catch (Exception e) {
       handleException(e, url, startTime,
           "createdTime start/end or limit or flowrunid");
@@ -1845,6 +1916,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param fromId If specified, retrieve the next set of applications
    *     from the given fromId. The set of applications retrieved is inclusive
    *     of specified fromId. fromId should be taken from the value associated
@@ -1881,12 +1956,15 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     return getEntities(req, res, null, null,
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromId);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -1950,6 +2028,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param fromId If specified, retrieve the next set of applications
    *     from the given fromId. The set of applications retrieved is inclusive
    *     of specified fromId. fromId should be taken from the value associated
@@ -1988,12 +2070,15 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     return getEntities(req, res, clusterId, null,
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromId);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2054,6 +2139,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param fromId If specified, retrieve the next set of applications
    *     from the given fromId. The set of applications retrieved is inclusive
    *     of specified fromId. fromId should be taken from the value associated
@@ -2089,12 +2178,15 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     return getEntities(req, res, null, null,
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         null, limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromId);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2156,6 +2248,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
+   * @param metricsTimeStart If specified, returned metrics for the apps would
+   *     not contain metric values before this timestamp(Optional query param).
+   * @param metricsTimeEnd If specified, returned metrics for the apps would
+   *     not contain metric values after this timestamp(Optional query param).
    * @param fromId If specified, retrieve the next set of applications
    *     from the given fromId. The set of applications retrieved is inclusive
    *     of specified fromId. fromId should be taken from the value associated
@@ -2192,12 +2288,15 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     return getEntities(req, res, clusterId, null,
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         null, limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromId);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2268,6 +2367,12 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the app attempts
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app attempts
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
    * @param fromId If specified, retrieve the next set of application-attempt
    *         entities from the given fromId. The set of application-attempt
    *         entities retrieved is inclusive of specified fromId. fromId should
@@ -2306,12 +2411,15 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
 
     return getAppAttempts(req, res, null, appId, userId, flowName, flowRunId,
         limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters, confsToRetrieve,
-        metricsToRetrieve, fields, metricsLimit, fromId);
+        metricsToRetrieve, fields, metricsLimit, metricsTimeStart,
+        metricsTimeEnd, fromId);
   }
 
   /**
@@ -2383,6 +2491,12 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the app attempts
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app attempts
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
    * @param fromId If specified, retrieve the next set of application-attempt
    *         entities from the given fromId. The set of application-attempt
    *         entities retrieved is inclusive of specified fromId. fromId should
@@ -2422,6 +2536,8 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
 
     return getEntities(req, res, clusterId, appId,
@@ -2429,7 +2545,7 @@ public class TimelineReaderWebServices {
         flowName, flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
         confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
-        fromId);
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2472,6 +2588,12 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the app attempt
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app attempt
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
    * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
    *          If specified, then entity retrieval will be faster.
    *
@@ -2499,10 +2621,12 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("entityidprefix") String entityIdPrefix) {
     return getAppAttempt(req, res, null, appId, appAttemptId, userId, flowName,
         flowRunId, confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
-        entityIdPrefix);
+        metricsTimeStart, metricsTimeEnd, entityIdPrefix);
   }
 
   /**
@@ -2545,6 +2669,12 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the app attempt
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the app attempt
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
    * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
    *          If specified, then entity retrieval will be faster.
    *
@@ -2574,11 +2704,13 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("entityidprefix") String entityIdPrefix) {
     return getEntity(req, res, clusterId, appId,
         TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString(), appAttemptId,
         userId, flowName, flowRunId, confsToRetrieve, metricsToRetrieve, fields,
-        metricsLimit, entityIdPrefix);
+        metricsLimit, metricsTimeStart, metricsTimeEnd, entityIdPrefix);
   }
 
   /**
@@ -2651,6 +2783,12 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the containers
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the containers
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
    * @param fromId If specified, retrieve the next set of container
    *         entities from the given fromId. The set of container
    *         entities retrieved is inclusive of specified fromId. fromId should
@@ -2690,12 +2828,14 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
     return getContainers(req, res, null, appId, appattemptId, userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
         confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
-        fromId);
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2769,6 +2909,12 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the containers
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the containers
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
    * @param fromId If specified, retrieve the next set of container
    *         entities from the given fromId. The set of container
    *         entities retrieved is inclusive of specified fromId. fromId should
@@ -2810,6 +2956,8 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("fromid") String fromId) {
 
     String entityType = TimelineEntityType.YARN_CONTAINER.toString();
@@ -2829,7 +2977,7 @@ public class TimelineReaderWebServices {
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilter, conffilters, metricfilters, eventfilters,
         confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
-        fromId);
+        metricsTimeStart, metricsTimeEnd, fromId);
   }
 
   /**
@@ -2871,6 +3019,12 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the container
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the container
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
    * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
    *          If specified, then entity retrieval will be faster.
    *
@@ -2898,10 +3052,12 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("entityidprefix") String entityIdPrefix) {
     return getContainer(req, res, null, appId, containerId, userId, flowName,
         flowRunId, confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
-        entityIdPrefix);
+        entityIdPrefix, metricsTimeStart, metricsTimeEnd);
   }
 
   /**
@@ -2944,6 +3100,12 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
+   * @param metricsTimeStart If specified, returned metrics for the container
+   *          would not contain metric values before this timestamp(Optional
+   *          query param).
+   * @param metricsTimeEnd If specified, returned metrics for the container
+   *          would not contain metric values after this timestamp(Optional
+   *          query param).
    * @param entityIdPrefix Defines the id prefix for the entity to be fetched.
    *          If specified, then entity retrieval will be faster.
    *
@@ -2973,11 +3135,13 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
       @QueryParam("entityidprefix") String entityIdPrefix) {
     return getEntity(req, res, clusterId, appId,
         TimelineEntityType.YARN_CONTAINER.toString(), containerId, userId,
         flowName, flowRunId, confsToRetrieve, metricsToRetrieve, fields,
-        metricsLimit, entityIdPrefix);
+        metricsLimit, metricsTimeStart, metricsTimeEnd, entityIdPrefix);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 4d3e769..cded3a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -127,11 +127,13 @@ final class TimelineReaderWebServicesUtils {
    * @throws TimelineParseException if any problem occurs during parsing.
    */
   static TimelineDataToRetrieve createTimelineDataToRetrieve(String confs,
-      String metrics, String fields, String metricsLimit)
+      String metrics, String fields, String metricsLimit,
+      String metricsTimeBegin, String metricsTimeEnd)
       throws TimelineParseException {
     return new TimelineDataToRetrieve(parseDataToRetrieve(confs),
         parseDataToRetrieve(metrics), parseFieldsStr(fields,
-        TimelineParseConstants.COMMA_DELIMITER), parseIntStr(metricsLimit));
+        TimelineParseConstants.COMMA_DELIMITER), parseIntStr(metricsLimit),
+        parseLongStr(metricsTimeBegin), parseLongStr(metricsTimeEnd));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
index 1bc66db..46873ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
@@ -319,7 +319,7 @@ public class TestFileSystemTimelineReaderImpl {
     TimelineEntity result = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", "id_1"),
-        new TimelineDataToRetrieve(null, null, null, null));
+        new TimelineDataToRetrieve(null, null, null, null, null, null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_1")).toString(),
         result.getIdentifier().toString());
@@ -334,7 +334,7 @@ public class TestFileSystemTimelineReaderImpl {
     TimelineEntity result = reader.getEntity(
         new TimelineReaderContext("cluster1", null, null, null, "app1", "app",
         "id_1"),
-        new TimelineDataToRetrieve(null, null, null, null));
+        new TimelineDataToRetrieve(null, null, null, null, null, null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_1")).toString(),
         result.getIdentifier().toString());
@@ -351,7 +351,7 @@ public class TestFileSystemTimelineReaderImpl {
     TimelineEntity result = reader.getEntity(
         new TimelineReaderContext("cluster1", null, null, null, "app2",
         "app", "id_5"),
-        new TimelineDataToRetrieve(null, null, null, null));
+        new TimelineDataToRetrieve(null, null, null, null, null, null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_5")).toString(),
         result.getIdentifier().toString());
@@ -365,7 +365,8 @@ public class TestFileSystemTimelineReaderImpl {
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", "id_1"),
         new TimelineDataToRetrieve(null, null,
-        EnumSet.of(Field.INFO, Field.CONFIGS, Field.METRICS), null));
+        EnumSet.of(Field.INFO, Field.CONFIGS, Field.METRICS), null, null,
+        null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_1")).toString(),
         result.getIdentifier().toString());
@@ -383,7 +384,8 @@ public class TestFileSystemTimelineReaderImpl {
     TimelineEntity result = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", "id_1"),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     Assert.assertEquals(
         (new TimelineEntity.Identifier("app", "id_1")).toString(),
         result.getIdentifier().toString());
@@ -399,7 +401,8 @@ public class TestFileSystemTimelineReaderImpl {
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null), new TimelineEntityFilters.Builder().build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     // All 4 entities will be returned
     Assert.assertEquals(4, result.size());
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: YARN-6734. Ensure sub-application user is extracted & sent to timeline service (Rohith Sharma K S via Varun Saxena)

Posted by st...@apache.org.
YARN-6734. Ensure sub-application user is extracted & sent to timeline service (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f654053
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f654053
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f654053

Branch: refs/heads/HADOOP-13345
Commit: 9f6540535d9148abbea836d54a9e94d25319c5d5
Parents: 3fb71b1
Author: Varun Saxena <va...@apache.org>
Authored: Fri Jul 28 22:02:19 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 ...stTimelineReaderWebServicesHBaseStorage.java |  26 ++-
 .../storage/DataGeneratorForTest.java           |  49 ++--
 .../storage/TestHBaseTimelineStorageApps.java   |  22 +-
 .../TestHBaseTimelineStorageEntities.java       | 128 +++++++++-
 .../flow/TestHBaseStorageFlowActivity.java      |  33 ++-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  84 +++++--
 .../flow/TestHBaseStorageFlowRunCompaction.java |  14 +-
 .../storage/HBaseTimelineWriterImpl.java        | 232 +++++++++++--------
 .../SubApplicationRowKeyPrefix.java             |  20 --
 .../collector/TimelineCollector.java            |  11 +-
 .../storage/FileSystemTimelineWriterImpl.java   |  15 +-
 .../timelineservice/storage/TimelineWriter.java |  28 +--
 .../collector/TestTimelineCollector.java        |  12 +-
 .../TestFileSystemTimelineWriterImpl.java       |   8 +-
 14 files changed, 465 insertions(+), 217 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 302f8e0..b589206 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -36,6 +36,7 @@ import java.util.Set;
 import javax.ws.rs.core.MediaType;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
@@ -47,6 +48,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
@@ -334,16 +336,21 @@ public class TestTimelineReaderWebServicesHBaseStorage
 
     HBaseTimelineWriterImpl hbi = null;
     Configuration c1 = getHBaseTestingUtility().getConfiguration();
+    UserGroupInformation remoteUser =
+        UserGroupInformation.createRemoteUser(user);
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, flowVersion, runid, entity.getId(), te);
-      hbi.write(cluster, user, flow, flowVersion, runid, entity1.getId(), te1);
-      hbi.write(cluster, user, flow, flowVersion, runid1, entity4.getId(), te4);
-      hbi.write(cluster, user, flow2,
-          flowVersion2, runid2, entity3.getId(), te3);
-      hbi.write(cluster, user, flow, flowVersion, runid,
-          "application_1111111111_1111", userEntities);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, entity.getId()), te, remoteUser);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, entity1.getId()), te1, remoteUser);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid1, entity4.getId()), te4, remoteUser);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow2, flowVersion2,
+          runid2, entity3.getId()), te3, remoteUser);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, "application_1111111111_1111"), userEntities, remoteUser);
       writeApplicationEntities(hbi, ts);
       hbi.flush();
     } finally {
@@ -375,8 +382,9 @@ public class TestTimelineReaderWebServicesHBaseStorage
 
         appEntity.addEvent(finished);
         te.addEntity(appEntity);
-        hbi.write("cluster1", "user1", "flow1", "CF7022C10F1354", i,
-            appEntity.getId(), te);
+        hbi.write(new TimelineCollectorContext("cluster1", "user1", "flow1",
+            "CF7022C10F1354", i, appEntity.getId()), te,
+            UserGroupInformation.createRemoteUser("user1"));
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index 926d8bb..cf6a854 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -26,6 +26,7 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
@@ -34,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 
 /**
  * Utility class that creates the schema and generates test data.
@@ -155,17 +157,20 @@ public final class DataGeneratorForTest {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(util.getConfiguration());
       hbi.start();
-      String cluster = "cluster1";
-      String user = "user1";
-      String flow = "some_flow_name";
-      String flowVersion = "AB7822C10F1111";
-      long runid = 1002345678919L;
-      String appName = "application_1111111111_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
-      appName = "application_1111111111_3333";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
-      appName = "application_1111111111_4444";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te2);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser("user1");
+      hbi.write(
+          new TimelineCollectorContext("cluster1", "user1", "some_flow_name",
+              "AB7822C10F1111", 1002345678919L, "application_1111111111_2222"),
+          te, remoteUser);
+      hbi.write(
+          new TimelineCollectorContext("cluster1", "user1", "some_flow_name",
+              "AB7822C10F1111", 1002345678919L, "application_1111111111_3333"),
+          te1, remoteUser);
+      hbi.write(
+          new TimelineCollectorContext("cluster1", "user1", "some_flow_name",
+              "AB7822C10F1111", 1002345678919L, "application_1111111111_4444"),
+          te2, remoteUser);
       hbi.stop();
     } finally {
       if (hbi != null) {
@@ -433,15 +438,19 @@ public final class DataGeneratorForTest {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(util.getConfiguration());
       hbi.start();
-      String cluster = "cluster1";
-      String user = "user1";
-      String flow = "some_flow_name";
-      String flowVersion = "AB7822C10F1111";
-      long runid = 1002345678919L;
-      hbi.write(cluster, user, flow, flowVersion, runid, appName1, te);
-      hbi.write(cluster, user, flow, flowVersion, runid, appName2, te);
-      hbi.write(cluster, user, flow, flowVersion, runid, appName1, appTe1);
-      hbi.write(cluster, user, flow, flowVersion, runid, appName2, appTe2);
+
+      UserGroupInformation user =
+          UserGroupInformation.createRemoteUser("user1");
+      TimelineCollectorContext context =
+          new TimelineCollectorContext("cluster1", "user1", "some_flow_name",
+              "AB7822C10F1111", 1002345678919L, appName1);
+      hbi.write(context, te, user);
+      hbi.write(context, appTe1, user);
+
+      context = new TimelineCollectorContext("cluster1", "user1",
+          "some_flow_name", "AB7822C10F1111", 1002345678919L, appName2);
+      hbi.write(context, te, user);
+      hbi.write(context, appTe2, user);
       hbi.stop();
     } finally {
       if (hbi != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index b227185..111008a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
@@ -51,6 +52,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -162,7 +164,8 @@ public class TestHBaseTimelineStorageApps {
       String flow = null;
       String flowVersion = "AB7822C10F1111";
       long runid = 1002345678919L;
-      hbi.write(cluster, user, flow, flowVersion, runid, appId, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appId), te, UserGroupInformation.createRemoteUser(user));
       hbi.stop();
 
       // retrieve the row
@@ -280,7 +283,8 @@ public class TestHBaseTimelineStorageApps {
       String flow = "s!ome_f\tlow  _n am!e";
       String flowVersion = "AB7822C10F1111";
       long runid = 1002345678919L;
-      hbi.write(cluster, user, flow, flowVersion, runid, appId, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appId), te, UserGroupInformation.createRemoteUser(user));
 
       // Write entity again, this time without created time.
       entity = new ApplicationEntity();
@@ -292,7 +296,8 @@ public class TestHBaseTimelineStorageApps {
       entity.addInfo(infoMap1);
       te = new TimelineEntities();
       te.addEntity(entity);
-      hbi.write(cluster, user, flow, flowVersion, runid, appId, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appId), te, UserGroupInformation.createRemoteUser(user));
       hbi.stop();
 
       infoMap.putAll(infoMap1);
@@ -514,7 +519,9 @@ public class TestHBaseTimelineStorageApps {
       String flowVersion = "1111F01C2287BA";
       long runid = 1009876543218L;
       String appName = "application_123465899910_1001";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, entities);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), entities,
+          UserGroupInformation.createRemoteUser(user));
       hbi.stop();
 
       // retrieve the row
@@ -629,14 +636,17 @@ public class TestHBaseTimelineStorageApps {
       hbi.init(c1);
       hbi.start();
       // Writing application entity.
+      TimelineCollectorContext context = new TimelineCollectorContext("c1",
+          "u1", "f1", "v1", 1002345678919L, appId);
+      UserGroupInformation user = UserGroupInformation.createRemoteUser("u1");
       try {
-        hbi.write("c1", "u1", "f1", "v1", 1002345678919L, appId, teApp);
+        hbi.write(context, teApp, user);
         Assert.fail("Expected an exception as metric values are non integral");
       } catch (IOException e) {}
 
       // Writing generic entity.
       try {
-        hbi.write("c1", "u1", "f1", "v1", 1002345678919L, appId, teEntity);
+        hbi.write(context, teEntity, user);
         Assert.fail("Expected an exception as metric values are non integral");
       } catch (IOException e) {}
       hbi.stop();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 3756091..5e08999 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
@@ -48,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -72,6 +74,11 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -197,13 +204,16 @@ public class TestHBaseTimelineStorageEntities {
       hbi.start();
       String cluster = "cluster_test_write_entity";
       String user = "user1";
+      String subAppUser = "subAppUser1";
       String flow = "some_flow_name";
       String flowVersion = "AB7822C10F1111";
       long runid = 1002345678919L;
       String appName = HBaseTimelineStorageUtils.convertApplicationIdToString(
           ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1)
       );
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te,
+          UserGroupInformation.createRemoteUser(subAppUser));
       hbi.stop();
 
       // scan the table and see that entity exists
@@ -354,6 +364,11 @@ public class TestHBaseTimelineStorageEntities {
         assertEquals(metricValues.get(ts - 20000),
             metric.getValues().get(ts - 20000));
       }
+
+      // verify for sub application table entities.
+      verifySubApplicationTableEntities(cluster, user, flow, flowVersion, runid,
+          appName, subAppUser, c1, entity, id, type, infoMap, isRelatedTo,
+          relatesTo, conf, metricValues, metrics, cTime, m1);
     } finally {
       if (hbi != null) {
         hbi.stop();
@@ -362,6 +377,98 @@ public class TestHBaseTimelineStorageEntities {
     }
   }
 
+  private void verifySubApplicationTableEntities(String cluster, String user,
+      String flow, String flowVersion, Long runid, String appName,
+      String subAppUser, Configuration c1, TimelineEntity entity, String id,
+      String type, Map<String, Object> infoMap,
+      Map<String, Set<String>> isRelatedTo, Map<String, Set<String>> relatesTo,
+      Map<String, String> conf, Map<Long, Number> metricValues,
+      Set<TimelineMetric> metrics, Long cTime, TimelineMetric m1)
+      throws IOException {
+    Scan s = new Scan();
+    // read from SubApplicationTable
+    byte[] startRow = new SubApplicationRowKeyPrefix(cluster, subAppUser, null,
+        null, null, null).getRowKeyPrefix();
+    s.setStartRow(startRow);
+    s.setMaxVersions(Integer.MAX_VALUE);
+    Connection conn = ConnectionFactory.createConnection(c1);
+    ResultScanner scanner =
+        new SubApplicationTable().getResultScanner(c1, conn, s);
+
+    int rowCount = 0;
+    int colCount = 0;
+    KeyConverter<String> stringKeyConverter = new StringKeyConverter();
+    for (Result result : scanner) {
+      if (result != null && !result.isEmpty()) {
+        rowCount++;
+        colCount += result.size();
+        byte[] row1 = result.getRow();
+        assertTrue(verifyRowKeyForSubApplication(row1, subAppUser, cluster,
+            user, entity));
+
+        // check info column family
+        String id1 = SubApplicationColumn.ID.readResult(result).toString();
+        assertEquals(id, id1);
+
+        String type1 = SubApplicationColumn.TYPE.readResult(result).toString();
+        assertEquals(type, type1);
+
+        Long cTime1 =
+            (Long) SubApplicationColumn.CREATED_TIME.readResult(result);
+        assertEquals(cTime1, cTime);
+
+        Map<String, Object> infoColumns = SubApplicationColumnPrefix.INFO
+            .readResults(result, new StringKeyConverter());
+        assertEquals(infoMap, infoColumns);
+
+        // Remember isRelatedTo is of type Map<String, Set<String>>
+        for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo
+            .entrySet()) {
+          Object isRelatedToValue = SubApplicationColumnPrefix.IS_RELATED_TO
+              .readResult(result, isRelatedToEntry.getKey());
+          String compoundValue = isRelatedToValue.toString();
+          // id7?id9?id6
+          Set<String> isRelatedToValues =
+              new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
+          assertEquals(isRelatedTo.get(isRelatedToEntry.getKey()).size(),
+              isRelatedToValues.size());
+          for (String v : isRelatedToEntry.getValue()) {
+            assertTrue(isRelatedToValues.contains(v));
+          }
+        }
+
+        // RelatesTo
+        for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo
+            .entrySet()) {
+          String compoundValue = SubApplicationColumnPrefix.RELATES_TO
+              .readResult(result, relatesToEntry.getKey()).toString();
+          // id3?id4?id5
+          Set<String> relatesToValues =
+              new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
+          assertEquals(relatesTo.get(relatesToEntry.getKey()).size(),
+              relatesToValues.size());
+          for (String v : relatesToEntry.getValue()) {
+            assertTrue(relatesToValues.contains(v));
+          }
+        }
+
+        // Configuration
+        Map<String, Object> configColumns = SubApplicationColumnPrefix.CONFIG
+            .readResults(result, stringKeyConverter);
+        assertEquals(conf, configColumns);
+
+        NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
+            SubApplicationColumnPrefix.METRIC.readResultsWithTimestamps(result,
+                stringKeyConverter);
+
+        NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
+        matchMetrics(metricValues, metricMap);
+      }
+    }
+    assertEquals(1, rowCount);
+    assertEquals(16, colCount);
+  }
+
   private boolean isRowKeyCorrect(byte[] rowKey, String cluster, String user,
       String flow, Long runid, String appName, TimelineEntity te) {
 
@@ -409,7 +516,9 @@ public class TestHBaseTimelineStorageEntities {
       byte[] startRow =
           new EntityRowKeyPrefix(cluster, user, flow, runid, appName)
               .getRowKeyPrefix();
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, entities);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), entities,
+          UserGroupInformation.createRemoteUser(user));
       hbi.stop();
       // scan the table and see that entity exists
       Scan s = new Scan();
@@ -514,7 +623,9 @@ public class TestHBaseTimelineStorageEntities {
       String flowVersion = "1111F01C2287BA";
       long runid = 1009876543218L;
       String appName = "application_123465899910_2001";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, entities);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), entities,
+          UserGroupInformation.createRemoteUser(user));
       hbi.stop();
 
       // read the timeline entity using the reader this time
@@ -1762,4 +1873,15 @@ public class TestHBaseTimelineStorageEntities {
   public static void tearDownAfterClass() throws Exception {
     util.shutdownMiniCluster();
   }
+
+  private boolean verifyRowKeyForSubApplication(byte[] rowKey, String suAppUser,
+      String cluster, String user, TimelineEntity te) {
+    SubApplicationRowKey key = SubApplicationRowKey.parseRowKey(rowKey);
+    assertEquals(suAppUser, key.getSubAppUserId());
+    assertEquals(cluster, key.getClusterId());
+    assertEquals(te.getType(), key.getEntityType());
+    assertEquals(te.getId(), key.getEntityId());
+    assertEquals(user, key.getUserId());
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
index 0923105..4bf221e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
@@ -38,12 +38,14 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -117,13 +119,18 @@ public class TestHBaseStorageFlowActivity {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // write another entity with the right min start time
       te = new TimelineEntities();
       te.addEntity(entityMinStartTime);
       appName = "application_100000000000_3333";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // writer another entity for max end time
       TimelineEntity entityMaxEndTime = TestFlowDataGenerator
@@ -131,7 +138,8 @@ public class TestHBaseStorageFlowActivity {
       te = new TimelineEntities();
       te.addEntity(entityMaxEndTime);
       appName = "application_100000000000_4444";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // writer another entity with greater start time
       TimelineEntity entityGreaterStartTime = TestFlowDataGenerator
@@ -139,7 +147,8 @@ public class TestHBaseStorageFlowActivity {
       te = new TimelineEntities();
       te.addEntity(entityGreaterStartTime);
       appName = "application_1000000000000000_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // flush everything to hbase
       hbi.flush();
@@ -227,7 +236,8 @@ public class TestHBaseStorageFlowActivity {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
       String appName = "application_1111999999_1234";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, UserGroupInformation.createRemoteUser(user));
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -340,20 +350,27 @@ public class TestHBaseStorageFlowActivity {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
       String appName = "application_11888888888_1111";
-      hbi.write(cluster, user, flow, flowVersion1, runid1, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion1,
+          runid1, appName), te, remoteUser);
 
       // write an application with to this flow but a different runid/ version
       te = new TimelineEntities();
       te.addEntity(entityApp1);
       appName = "application_11888888888_2222";
-      hbi.write(cluster, user, flow, flowVersion2, runid2, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion2,
+          runid2, appName), te, remoteUser);
 
       // write an application with to this flow but a different runid/ version
       te = new TimelineEntities();
       te.addEntity(entityApp1);
       appName = "application_11888888888_3333";
-      hbi.write(cluster, user, flow, flowVersion3, runid3, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion3,
+          runid3, appName), te, remoteUser);
 
       hbi.flush();
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
index acfdc4d..1ad02e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
@@ -43,11 +43,13 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -181,13 +183,18 @@ public class TestHBaseStorageFlowRun {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // write another entity with the right min start time
       te = new TimelineEntities();
       te.addEntity(entityMinStartTime);
       appName = "application_100000000000_3333";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // writer another entity for max end time
       TimelineEntity entityMaxEndTime = TestFlowDataGenerator
@@ -195,7 +202,8 @@ public class TestHBaseStorageFlowRun {
       te = new TimelineEntities();
       te.addEntity(entityMaxEndTime);
       appName = "application_100000000000_4444";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // writer another entity with greater start time
       TimelineEntity entityGreaterStartTime = TestFlowDataGenerator
@@ -203,7 +211,8 @@ public class TestHBaseStorageFlowRun {
       te = new TimelineEntities();
       te.addEntity(entityGreaterStartTime);
       appName = "application_1000000000000000_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
 
       // flush everything to hbase
       hbi.flush();
@@ -287,15 +296,19 @@ public class TestHBaseStorageFlowRun {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
       String appName = "application_11111111111111_1111";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator
           .getEntityMetricsApp2(System.currentTimeMillis());
       te.addEntity(entityApp2);
       appName = "application_11111111111111_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -556,15 +569,22 @@ public class TestHBaseStorageFlowRun {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
       String appName = "application_11111111111111_1111";
-      hbi.write(cluster, user, flow, flowVersion, 1002345678919L, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          1002345678919L, appName), te,
+          remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator
           .getEntityMetricsApp2(System.currentTimeMillis());
       te.addEntity(entityApp2);
       appName = "application_11111111111111_2222";
-      hbi.write(cluster, user, flow, flowVersion, 1002345678918L, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          1002345678918L, appName), te,
+          remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -643,15 +663,20 @@ public class TestHBaseStorageFlowRun {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
       String appName = "application_11111111111111_1111";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator
           .getEntityMetricsApp2(System.currentTimeMillis());
       te.addEntity(entityApp2);
       appName = "application_11111111111111_2222";
-      hbi.write(cluster, user, flow, flowVersion, runid, appName, te);
+      hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+          runid, appName), te, remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -737,6 +762,8 @@ public class TestHBaseStorageFlowRun {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
 
       for (int i = start; i < count; i++) {
         String appName = "application_1060350000000_" + appIdSuffix;
@@ -746,7 +773,8 @@ public class TestHBaseStorageFlowRun {
         te1.addEntity(entityApp1);
         entityApp2 = TestFlowDataGenerator.getMaxFlushEntity(insertTs);
         te1.addEntity(entityApp2);
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1, remoteUser);
         Thread.sleep(1);
 
         appName = "application_1001199480000_7" + appIdSuffix;
@@ -758,7 +786,9 @@ public class TestHBaseStorageFlowRun {
         entityApp2 = TestFlowDataGenerator.getMaxFlushEntity(insertTs);
         te1.addEntity(entityApp2);
 
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1,
+            remoteUser);
         if (i % 1000 == 0) {
           hbi.flush();
           checkMinMaxFlush(c1, minTS, startTs, count, cluster, user, flow,
@@ -826,16 +856,23 @@ public class TestHBaseStorageFlowRun {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, "CF7022C10F1354", 1002345678919L,
-          "application_11111111111111_1111", te);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
+      hbi.write(
+          new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354",
+              1002345678919L, "application_11111111111111_1111"),
+          te, remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator.getEntityMetricsApp2(
           System.currentTimeMillis());
       entityApp2.setCreatedTime(1425016502000L);
       te.addEntity(entityApp2);
-      hbi.write(cluster, user, flow, "CF7022C10F1354", 1002345678918L,
-          "application_11111111111111_2222", te);
+      hbi.write(
+          new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354",
+              1002345678918L, "application_11111111111111_2222"),
+          te, remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -911,15 +948,22 @@ public class TestHBaseStorageFlowRun {
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
-      hbi.write(cluster, user, flow, "CF7022C10F1354", 1002345678919L,
-          "application_11111111111111_1111", te);
+      UserGroupInformation remoteUser =
+          UserGroupInformation.createRemoteUser(user);
+
+      hbi.write(
+          new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354",
+              1002345678919L, "application_11111111111111_1111"),
+          te, remoteUser);
       // write another application with same metric to this flow
       te = new TimelineEntities();
       TimelineEntity entityApp2 = TestFlowDataGenerator.getEntityMetricsApp2(
           System.currentTimeMillis());
       te.addEntity(entityApp2);
-      hbi.write(cluster, user, flow, "CF7022C10F1354", 1002345678918L,
-          "application_11111111111111_2222", te);
+      hbi.write(
+          new TimelineCollectorContext(cluster, user, flow, "CF7022C10F1354",
+              1002345678918L, "application_11111111111111_2222"),
+          te, remoteUser);
       hbi.flush();
     } finally {
       if (hbi != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
index fa9d029..0ef8260 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
@@ -48,8 +48,10 @@ import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
@@ -280,9 +282,12 @@ public class TestHBaseStorageFlowRunCompaction {
     Configuration c1 = util.getConfiguration();
     TimelineEntities te1 = null;
     TimelineEntity entityApp1 = null;
+    UserGroupInformation remoteUser =
+        UserGroupInformation.createRemoteUser(user);
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
+
       // now insert count * ( 100 + 100) metrics
       // each call to getEntityMetricsApp1 brings back 100 values
       // of metric1 and 100 of metric2
@@ -292,14 +297,16 @@ public class TestHBaseStorageFlowRunCompaction {
         te1 = new TimelineEntities();
         entityApp1 = TestFlowDataGenerator.getEntityMetricsApp1(insertTs, c1);
         te1.addEntity(entityApp1);
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1, remoteUser);
 
         appName = "application_2048000000000_7" + appIdSuffix;
         insertTs++;
         te1 = new TimelineEntities();
         entityApp1 = TestFlowDataGenerator.getEntityMetricsApp2(insertTs);
         te1.addEntity(entityApp1);
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1, remoteUser);
       }
     } finally {
       String appName = "application_10240000000000_" + appIdSuffix;
@@ -308,7 +315,8 @@ public class TestHBaseStorageFlowRunCompaction {
           insertTs + 1, c1);
       te1.addEntity(entityApp1);
       if (hbi != null) {
-        hbi.write(cluster, user, flow, flowVersion, runid, appName, te1);
+        hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
+            runid, appName), te1, remoteUser);
         hbi.flush();
         hbi.close();
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
index df33024..9e9134c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import  org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
@@ -34,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
@@ -63,6 +65,10 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -85,6 +91,7 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
   private TypedBufferedMutator<ApplicationTable> applicationTable;
   private TypedBufferedMutator<FlowActivityTable> flowActivityTable;
   private TypedBufferedMutator<FlowRunTable> flowRunTable;
+  private TypedBufferedMutator<SubApplicationTable> subApplicationTable;
 
   /**
    * Used to convert strings key components to and from storage format.
@@ -97,6 +104,10 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
    */
   private final KeyConverter<Long> longKeyConverter = new LongKeyConverter();
 
+  private enum Tables {
+    APPLICATION_TABLE, ENTITY_TABLE, SUBAPPLICATION_TABLE
+  };
+
   public HBaseTimelineWriterImpl() {
     super(HBaseTimelineWriterImpl.class.getName());
   }
@@ -116,17 +127,28 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
     flowRunTable = new FlowRunTable().getTableMutator(hbaseConf, conn);
     flowActivityTable =
         new FlowActivityTable().getTableMutator(hbaseConf, conn);
+    subApplicationTable =
+        new SubApplicationTable().getTableMutator(hbaseConf, conn);
   }
 
   /**
    * Stores the entire information in TimelineEntities to the timeline store.
    */
   @Override
-  public TimelineWriteResponse write(String clusterId, String userId,
-      String flowName, String flowVersion, long flowRunId, String appId,
-      TimelineEntities data) throws IOException {
+  public TimelineWriteResponse write(TimelineCollectorContext context,
+      TimelineEntities data, UserGroupInformation callerUgi)
+      throws IOException {
 
     TimelineWriteResponse putStatus = new TimelineWriteResponse();
+
+    String clusterId = context.getClusterId();
+    String userId = context.getUserId();
+    String flowName = context.getFlowName();
+    String flowVersion = context.getFlowVersion();
+    long flowRunId = context.getFlowRunId();
+    String appId = context.getAppId();
+    String subApplicationUser = callerUgi.getShortUserName();
+
     // defensive coding to avoid NPE during row key construction
     if ((flowName == null) || (appId == null) || (clusterId == null)
         || (userId == null)) {
@@ -152,18 +174,22 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
             new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
                 appId);
         rowKey = applicationRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.APPLICATION_TABLE);
       } else {
         EntityRowKey entityRowKey =
             new EntityRowKey(clusterId, userId, flowName, flowRunId, appId,
                 te.getType(), te.getIdPrefix(), te.getId());
         rowKey = entityRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.ENTITY_TABLE);
       }
 
-      storeInfo(rowKey, te, flowVersion, isApplication);
-      storeEvents(rowKey, te.getEvents(), isApplication);
-      storeConfig(rowKey, te.getConfigs(), isApplication);
-      storeMetrics(rowKey, te.getMetrics(), isApplication);
-      storeRelations(rowKey, te, isApplication);
+      if (!isApplication && !userId.equals(subApplicationUser)) {
+        SubApplicationRowKey subApplicationRowKey =
+            new SubApplicationRowKey(subApplicationUser, clusterId,
+                te.getType(), te.getIdPrefix(), te.getId(), userId);
+        rowKey = subApplicationRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.SUBAPPLICATION_TABLE);
+      }
 
       if (isApplication) {
         TimelineEvent event =
@@ -304,72 +330,108 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
     }
   }
 
-  private void storeRelations(byte[] rowKey, TimelineEntity te,
-      boolean isApplication) throws IOException {
-    if (isApplication) {
-      storeRelations(rowKey, te.getIsRelatedToEntities(),
-          ApplicationColumnPrefix.IS_RELATED_TO, applicationTable);
-      storeRelations(rowKey, te.getRelatesToEntities(),
-          ApplicationColumnPrefix.RELATES_TO, applicationTable);
-    } else {
-      storeRelations(rowKey, te.getIsRelatedToEntities(),
-          EntityColumnPrefix.IS_RELATED_TO, entityTable);
-      storeRelations(rowKey, te.getRelatesToEntities(),
-          EntityColumnPrefix.RELATES_TO, entityTable);
-    }
-  }
-
   /**
    * Stores the Relations from the {@linkplain TimelineEntity} object.
    */
   private <T> void storeRelations(byte[] rowKey,
-      Map<String, Set<String>> connectedEntities,
-      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
-          throws IOException {
-    for (Map.Entry<String, Set<String>> connectedEntity : connectedEntities
-        .entrySet()) {
-      // id3?id4?id5
-      String compoundValue =
-          Separator.VALUES.joinEncoded(connectedEntity.getValue());
-      columnPrefix.store(rowKey, table,
-          stringKeyConverter.encode(connectedEntity.getKey()), null,
-          compoundValue);
+      Map<String, Set<String>> connectedEntities, ColumnPrefix<T> columnPrefix,
+      TypedBufferedMutator<T> table) throws IOException {
+    if (connectedEntities != null) {
+      for (Map.Entry<String, Set<String>> connectedEntity : connectedEntities
+          .entrySet()) {
+        // id3?id4?id5
+        String compoundValue =
+            Separator.VALUES.joinEncoded(connectedEntity.getValue());
+        columnPrefix.store(rowKey, table,
+            stringKeyConverter.encode(connectedEntity.getKey()), null,
+            compoundValue);
+      }
     }
   }
 
   /**
    * Stores information from the {@linkplain TimelineEntity} object.
    */
-  private void storeInfo(byte[] rowKey, TimelineEntity te, String flowVersion,
-      boolean isApplication) throws IOException {
-
-    if (isApplication) {
+  private void store(byte[] rowKey, TimelineEntity te,
+      String flowVersion,
+      Tables table) throws IOException {
+    switch (table) {
+    case APPLICATION_TABLE:
       ApplicationColumn.ID.store(rowKey, applicationTable, null, te.getId());
       ApplicationColumn.CREATED_TIME.store(rowKey, applicationTable, null,
           te.getCreatedTime());
       ApplicationColumn.FLOW_VERSION.store(rowKey, applicationTable, null,
           flowVersion);
-      Map<String, Object> info = te.getInfo();
-      if (info != null) {
-        for (Map.Entry<String, Object> entry : info.entrySet()) {
-          ApplicationColumnPrefix.INFO.store(rowKey, applicationTable,
-              stringKeyConverter.encode(entry.getKey()), null,
-              entry.getValue());
-        }
-      }
-    } else {
+      storeInfo(rowKey, te.getInfo(), flowVersion, ApplicationColumnPrefix.INFO,
+          applicationTable);
+      storeMetrics(rowKey, te.getMetrics(), ApplicationColumnPrefix.METRIC,
+          applicationTable);
+      storeEvents(rowKey, te.getEvents(), ApplicationColumnPrefix.EVENT,
+          applicationTable);
+      storeConfig(rowKey, te.getConfigs(), ApplicationColumnPrefix.CONFIG,
+          applicationTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          ApplicationColumnPrefix.IS_RELATED_TO, applicationTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          ApplicationColumnPrefix.RELATES_TO, applicationTable);
+      break;
+    case ENTITY_TABLE:
       EntityColumn.ID.store(rowKey, entityTable, null, te.getId());
       EntityColumn.TYPE.store(rowKey, entityTable, null, te.getType());
       EntityColumn.CREATED_TIME.store(rowKey, entityTable, null,
           te.getCreatedTime());
       EntityColumn.FLOW_VERSION.store(rowKey, entityTable, null, flowVersion);
-      Map<String, Object> info = te.getInfo();
-      if (info != null) {
-        for (Map.Entry<String, Object> entry : info.entrySet()) {
-          EntityColumnPrefix.INFO.store(rowKey, entityTable,
-              stringKeyConverter.encode(entry.getKey()), null,
-              entry.getValue());
-        }
+      storeInfo(rowKey, te.getInfo(), flowVersion, EntityColumnPrefix.INFO,
+          entityTable);
+      storeMetrics(rowKey, te.getMetrics(), EntityColumnPrefix.METRIC,
+          entityTable);
+      storeEvents(rowKey, te.getEvents(), EntityColumnPrefix.EVENT,
+          entityTable);
+      storeConfig(rowKey, te.getConfigs(), EntityColumnPrefix.CONFIG,
+          entityTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          EntityColumnPrefix.IS_RELATED_TO, entityTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          EntityColumnPrefix.RELATES_TO, entityTable);
+      break;
+    case SUBAPPLICATION_TABLE:
+      SubApplicationColumn.ID.store(rowKey, subApplicationTable, null,
+          te.getId());
+      SubApplicationColumn.TYPE.store(rowKey, subApplicationTable, null,
+          te.getType());
+      SubApplicationColumn.CREATED_TIME.store(rowKey, subApplicationTable, null,
+          te.getCreatedTime());
+      SubApplicationColumn.FLOW_VERSION.store(rowKey, subApplicationTable, null,
+          flowVersion);
+      storeInfo(rowKey, te.getInfo(), flowVersion,
+          SubApplicationColumnPrefix.INFO, subApplicationTable);
+      storeMetrics(rowKey, te.getMetrics(), SubApplicationColumnPrefix.METRIC,
+          subApplicationTable);
+      storeEvents(rowKey, te.getEvents(), SubApplicationColumnPrefix.EVENT,
+          subApplicationTable);
+      storeConfig(rowKey, te.getConfigs(), SubApplicationColumnPrefix.CONFIG,
+          subApplicationTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          SubApplicationColumnPrefix.IS_RELATED_TO, subApplicationTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          SubApplicationColumnPrefix.RELATES_TO, subApplicationTable);
+      break;
+    default:
+      LOG.info("Invalid table name provided.");
+      break;
+    }
+  }
+
+  /**
+   * stores the info information from {@linkplain TimelineEntity}.
+   */
+  private <T> void storeInfo(byte[] rowKey, Map<String, Object> info,
+      String flowVersion, ColumnPrefix<T> columnPrefix,
+      TypedBufferedMutator<T> table) throws IOException {
+    if (info != null) {
+      for (Map.Entry<String, Object> entry : info.entrySet()) {
+        columnPrefix.store(rowKey, table,
+            stringKeyConverter.encode(entry.getKey()), null, entry.getValue());
       }
     }
   }
@@ -377,19 +439,13 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
   /**
    * stores the config information from {@linkplain TimelineEntity}.
    */
-  private void storeConfig(byte[] rowKey, Map<String, String> config,
-      boolean isApplication) throws IOException {
-    if (config == null) {
-      return;
-    }
-    for (Map.Entry<String, String> entry : config.entrySet()) {
-      byte[] configKey = stringKeyConverter.encode(entry.getKey());
-      if (isApplication) {
-        ApplicationColumnPrefix.CONFIG.store(rowKey, applicationTable,
-            configKey, null, entry.getValue());
-      } else {
-        EntityColumnPrefix.CONFIG.store(rowKey, entityTable, configKey,
-            null, entry.getValue());
+  private <T> void storeConfig(byte[] rowKey, Map<String, String> config,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
+    if (config != null) {
+      for (Map.Entry<String, String> entry : config.entrySet()) {
+        byte[] configKey = stringKeyConverter.encode(entry.getKey());
+        columnPrefix.store(rowKey, table, configKey, null, entry.getValue());
       }
     }
   }
@@ -398,8 +454,9 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
    * stores the {@linkplain TimelineMetric} information from the
    * {@linkplain TimelineEvent} object.
    */
-  private void storeMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
-      boolean isApplication) throws IOException {
+  private <T> void storeMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
     if (metrics != null) {
       for (TimelineMetric metric : metrics) {
         byte[] metricColumnQualifier =
@@ -407,13 +464,8 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
         Map<Long, Number> timeseries = metric.getValues();
         for (Map.Entry<Long, Number> timeseriesEntry : timeseries.entrySet()) {
           Long timestamp = timeseriesEntry.getKey();
-          if (isApplication) {
-            ApplicationColumnPrefix.METRIC.store(rowKey, applicationTable,
-                metricColumnQualifier, timestamp, timeseriesEntry.getValue());
-          } else {
-            EntityColumnPrefix.METRIC.store(rowKey, entityTable,
-                metricColumnQualifier, timestamp, timeseriesEntry.getValue());
-          }
+          columnPrefix.store(rowKey, table, metricColumnQualifier, timestamp,
+              timeseriesEntry.getValue());
         }
       }
     }
@@ -422,8 +474,9 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
   /**
    * Stores the events from the {@linkplain TimelineEvent} object.
    */
-  private void storeEvents(byte[] rowKey, Set<TimelineEvent> events,
-      boolean isApplication) throws IOException {
+  private <T> void storeEvents(byte[] rowKey, Set<TimelineEvent> events,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
     if (events != null) {
       for (TimelineEvent event : events) {
         if (event != null) {
@@ -441,26 +494,16 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
               byte[] columnQualifierBytes =
                   new EventColumnName(eventId, eventTimestamp, null)
                       .getColumnQualifier();
-              if (isApplication) {
-                ApplicationColumnPrefix.EVENT.store(rowKey, applicationTable,
-                    columnQualifierBytes, null, Separator.EMPTY_BYTES);
-              } else {
-                EntityColumnPrefix.EVENT.store(rowKey, entityTable,
-                    columnQualifierBytes, null, Separator.EMPTY_BYTES);
-              }
+              columnPrefix.store(rowKey, table, columnQualifierBytes, null,
+                  Separator.EMPTY_BYTES);
             } else {
               for (Map.Entry<String, Object> info : eventInfo.entrySet()) {
                 // eventId=infoKey
                 byte[] columnQualifierBytes =
                     new EventColumnName(eventId, eventTimestamp, info.getKey())
                         .getColumnQualifier();
-                if (isApplication) {
-                  ApplicationColumnPrefix.EVENT.store(rowKey, applicationTable,
-                      columnQualifierBytes, null, info.getValue());
-                } else {
-                  EntityColumnPrefix.EVENT.store(rowKey, entityTable,
-                      columnQualifierBytes, null, info.getValue());
-                }
+                columnPrefix.store(rowKey, table, columnQualifierBytes, null,
+                    info.getValue());
               } // for info: eventInfo
             }
           }
@@ -500,6 +543,7 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
     applicationTable.flush();
     flowRunTable.flush();
     flowActivityTable.flush();
+    subApplicationTable.flush();
   }
 
   /**
@@ -532,11 +576,13 @@ public class HBaseTimelineWriterImpl extends AbstractService implements
       // The close API performs flushing and releases any resources held
       flowActivityTable.close();
     }
+    if (subApplicationTable != null) {
+      subApplicationTable.close();
+    }
     if (conn != null) {
       LOG.info("closing the hbase Connection");
       conn.close();
     }
     super.serviceStop();
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
index e42c6cd..0c04959 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
@@ -56,26 +56,6 @@ public class SubApplicationRowKeyPrefix extends SubApplicationRowKey
         userId);
   }
 
-  /**
-   * Creates a prefix which generates the following rowKeyPrefixes for the sub
-   * application table:
-   * {@code subAppUserId!clusterId!entityType!entityPrefix!entityId!userId}.
-   *
-   * subAppUserId is usually the doAsUser.
-   * userId is the yarn user that the AM runs as.
-   *
-   * @param clusterId
-   *          identifying the cluster
-   * @param subAppUserId
-   *          identifying the sub app user
-   * @param userId
-   *          identifying the user who runs the AM
-   */
-  public SubApplicationRowKeyPrefix(String clusterId, String subAppUserId,
-      String userId) {
-    this(subAppUserId, clusterId, null, null, null, userId);
-  }
-
   /*
    * (non-Javadoc)
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index 37387f1..306806f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -139,7 +139,7 @@ public abstract class TimelineCollector extends CompositeService {
     // flush the writer buffer concurrently and swallow any exception
     // caused by the timeline enitites that are being put here.
     synchronized (writer) {
-      response = writeTimelineEntities(entities);
+      response = writeTimelineEntities(entities, callerUgi);
       flushBufferedTimelineEntities();
     }
 
@@ -147,15 +147,14 @@ public abstract class TimelineCollector extends CompositeService {
   }
 
   private TimelineWriteResponse writeTimelineEntities(
-      TimelineEntities entities) throws IOException {
+      TimelineEntities entities, UserGroupInformation callerUgi)
+      throws IOException {
     // Update application metrics for aggregation
     updateAggregateStatus(entities, aggregationGroups,
         getEntityTypesSkipAggregation());
 
     final TimelineCollectorContext context = getTimelineEntityContext();
-    return writer.write(context.getClusterId(), context.getUserId(),
-        context.getFlowName(), context.getFlowVersion(),
-        context.getFlowRunId(), context.getAppId(), entities);
+    return writer.write(context, entities, callerUgi);
   }
 
   /**
@@ -187,7 +186,7 @@ public abstract class TimelineCollector extends CompositeService {
           callerUgi + ")");
     }
 
-    writeTimelineEntities(entities);
+    writeTimelineEntities(entities, callerUgi);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
index 1f527f2..ee41970 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java
@@ -28,12 +28,14 @@ import java.io.PrintWriter;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse.TimelineWriteError;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -68,10 +70,17 @@ public class FileSystemTimelineWriterImpl extends AbstractService
   }
 
   @Override
-  public TimelineWriteResponse write(String clusterId, String userId,
-      String flowName, String flowVersion, long flowRunId, String appId,
-      TimelineEntities entities) throws IOException {
+  public TimelineWriteResponse write(TimelineCollectorContext context,
+      TimelineEntities entities, UserGroupInformation callerUgi)
+      throws IOException {
     TimelineWriteResponse response = new TimelineWriteResponse();
+    String clusterId = context.getClusterId();
+    String userId = context.getUserId();
+    String flowName = context.getFlowName();
+    String flowVersion = context.getFlowVersion();
+    long flowRunId = context.getFlowRunId();
+    String appId = context.getAppId();
+
     for (TimelineEntity entity : entities.getEntities()) {
       write(clusterId, userId, flowName, flowVersion, flowRunId, appId, entity,
           response);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
index 663a18a..12bc1cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineWriter.java
@@ -21,10 +21,12 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 
 /**
  * This interface is for storing application timeline information.
@@ -34,25 +36,19 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
 public interface TimelineWriter extends Service {
 
   /**
-   * Stores the entire information in {@link TimelineEntities} to the
-   * timeline store. Any errors occurring for individual write request objects
-   * will be reported in the response.
+   * Stores the entire information in {@link TimelineEntities} to the timeline
+   * store. Any errors occurring for individual write request objects will be
+   * reported in the response.
    *
-   * @param clusterId context cluster ID
-   * @param userId context user ID
-   * @param flowName context flow name
-   * @param flowVersion context flow version
-   * @param flowRunId run id for the flow.
-   * @param appId context app ID.
-   * @param data
-   *          a {@link TimelineEntities} object.
+   * @param context a {@link TimelineCollectorContext}
+   * @param data a {@link TimelineEntities} object.
+   * @param callerUgi {@link UserGroupInformation}.
    * @return a {@link TimelineWriteResponse} object.
-   * @throws IOException if there is any exception encountered while storing
-   *     or writing entities to the backend storage.
+   * @throws IOException if there is any exception encountered while storing or
+   *           writing entities to the back end storage.
    */
-  TimelineWriteResponse write(String clusterId, String userId,
-      String flowName, String flowVersion, long flowRunId, String appId,
-      TimelineEntities data) throws IOException;
+  TimelineWriteResponse write(TimelineCollectorContext context,
+      TimelineEntities data, UserGroupInformation callerUgi) throws IOException;
 
   /**
    * Aggregates the entity information to the timeline store based on which

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
index 0f17553..ec45428 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/collector/TestTimelineCollector.java
@@ -41,8 +41,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
@@ -156,9 +154,8 @@ public class TestTimelineCollector {
     collector.putEntities(
         entities, UserGroupInformation.createRemoteUser("test-user"));
 
-    verify(writer, times(1)).write(
-        anyString(), anyString(), anyString(), anyString(), anyLong(),
-        anyString(), any(TimelineEntities.class));
+    verify(writer, times(1)).write(any(TimelineCollectorContext.class),
+        any(TimelineEntities.class), any(UserGroupInformation.class));
     verify(writer, times(1)).flush();
   }
 
@@ -175,9 +172,8 @@ public class TestTimelineCollector {
     collector.putEntitiesAsync(
         entities, UserGroupInformation.createRemoteUser("test-user"));
 
-    verify(writer, times(1)).write(
-        anyString(), anyString(), anyString(), anyString(), anyLong(),
-        anyString(), any(TimelineEntities.class));
+    verify(writer, times(1)).write(any(TimelineCollectorContext.class),
+        any(TimelineEntities.class), any(UserGroupInformation.class));
     verify(writer, never()).flush();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f654053/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
index 4f12c57..bb9f54f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineWriterImpl.java
@@ -30,11 +30,13 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.junit.Rule;
 import org.junit.Test;
@@ -89,8 +91,10 @@ public class TestFileSystemTimelineWriterImpl {
           outputRoot);
       fsi.init(conf);
       fsi.start();
-      fsi.write("cluster_id", "user_id", "flow_name", "flow_version", 12345678L,
-          "app_id", te);
+      fsi.write(
+          new TimelineCollectorContext("cluster_id", "user_id", "flow_name",
+              "flow_version", 12345678L, "app_id"),
+          te, UserGroupInformation.createRemoteUser("user_id"));
 
       String fileName = fsi.getOutputRoot() + File.separator + "entities" +
           File.separator + "cluster_id" + File.separator + "user_id" +


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: YARN-7116. CapacityScheduler Web UI: Queue's AM usage is always show on per-user's AM usage. Contributed by Wangda Tan.

Posted by st...@apache.org.
YARN-7116. CapacityScheduler Web UI: Queue's AM usage is always show on per-user's AM usage. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9e0cc8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9e0cc8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9e0cc8c

Branch: refs/heads/HADOOP-13345
Commit: f9e0cc8cdca387bf2811c07beccae4af07362952
Parents: ac12e15
Author: Sunil G <su...@apache.org>
Authored: Thu Aug 31 18:42:01 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Thu Aug 31 18:42:01 2017 +0530

----------------------------------------------------------------------
 .../server/resourcemanager/webapp/CapacitySchedulerPage.java  | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9e0cc8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index f3ab5b0..93e1c97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -228,9 +228,10 @@ class CapacitySchedulerPage extends RmView {
           resourcesUsed = userInfo.getResourceUsageInfo()
               .getPartitionResourceUsageInfo(nodeLabel).getUsed();
         }
-        ResourceInfo amUsed = (resourceUsages.getAmUsed() == null)
-            ? new ResourceInfo(Resources.none())
-            : resourceUsages.getAmUsed();
+        ResourceInfo amUsed = userInfo.getAMResourcesUsed();
+        if (amUsed == null) {
+          amUsed = new ResourceInfo(Resources.none());
+        }
         String highlightIfAsking =
             userInfo.getIsActive() ? ACTIVE_USER : null;
         tbody.tr().$style(highlightIfAsking).td(userInfo.getUsername())


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: YARN-6133. [ATSv2 Security] Renew delegation token for app automatically if an app collector is active. Contributed by Varun Saxena.

Posted by st...@apache.org.
YARN-6133. [ATSv2 Security] Renew delegation token for app automatically if an app collector is active. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/354be99d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/354be99d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/354be99d

Branch: refs/heads/HADOOP-13345
Commit: 354be99dbf3b6effb45032b574210fd7161d83d4
Parents: 7594d1d
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Thu Aug 10 11:12:57 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../security/TestTimelineAuthFilterForV2.java   | 27 +++++-
 .../collector/AppLevelTimelineCollector.java    | 17 +++-
 .../collector/NodeTimelineCollectorManager.java | 88 +++++++++++++++++++-
 .../collector/TimelineCollector.java            |  7 ++
 .../collector/TimelineCollectorManager.java     |  8 +-
 ...neV2DelegationTokenSecretManagerService.java |  6 ++
 6 files changed, 139 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/354be99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index 0ddf287..f1d5185 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.atLeastOnce;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
@@ -183,6 +184,13 @@ public class TestTimelineAuthFilterForV2 {
       conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
           HttpConfig.Policy.HTTP_ONLY.name());
     }
+    if (!withKerberosLogin) {
+      // For timeline delegation token based access, set delegation token renew
+      // interval to 100 ms. to test if timeline delegation token for the app is
+      // renewed automatically if app is still alive.
+      conf.setLong(
+          YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL, 100);
+    }
     UserGroupInformation.setConfiguration(conf);
     collectorManager = new DummyNodeTimelineCollectorManager();
     auxService = PerNodeTimelineCollectorsAuxService.launchServer(
@@ -282,12 +290,12 @@ public class TestTimelineAuthFilterForV2 {
   }
 
   private void publishAndVerifyEntity(ApplicationId appId, File entityTypeDir,
-      String entityType) throws Exception {
+      String entityType, int numEntities) throws Exception {
     TimelineV2Client client = createTimelineClientForUGI(appId);
     try {
     // Sync call. Results available immediately.
       client.putEntities(createEntity("entity1", entityType));
-      assertEquals(1, entityTypeDir.listFiles().length);
+      assertEquals(numEntities, entityTypeDir.listFiles().length);
       verifyEntity(entityTypeDir, "entity1", entityType);
       // Async call.
       client.putEntitiesAsync(createEntity("entity2", entityType));
@@ -312,12 +320,22 @@ public class TestTimelineAuthFilterForV2 {
         KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
           @Override
           public Void call() throws Exception {
-            publishAndVerifyEntity(appId, entityTypeDir, entityType);
+            publishAndVerifyEntity(appId, entityTypeDir, entityType, 1);
             return null;
           }
         });
       } else {
-        publishAndVerifyEntity(appId, entityTypeDir, entityType);
+        publishAndVerifyEntity(appId, entityTypeDir, entityType, 1);
+        // Verify if token is renewed automatically and entities can still be
+        // published.
+        Thread.sleep(1000);
+        publishAndVerifyEntity(appId, entityTypeDir, entityType, 2);
+        AppLevelTimelineCollector collector =
+            (AppLevelTimelineCollector) collectorManager.get(appId);
+        assertNotNull(collector);
+        verify(collectorManager.getTokenManagerService(), atLeastOnce()).
+            renewToken(eq(collector.getDelegationTokenForApp()),
+                any(String.class));
       }
       // Wait for async entity to be published.
       for (int i = 0; i < 50; i++) {
@@ -330,6 +348,7 @@ public class TestTimelineAuthFilterForV2 {
       verifyEntity(entityTypeDir, "entity2", entityType);
       AppLevelTimelineCollector collector =
           (AppLevelTimelineCollector)collectorManager.get(appId);
+      assertNotNull(collector);
       auxService.removeApplication(appId);
       verify(collectorManager.getTokenManagerService()).cancelToken(
           eq(collector.getDelegationTokenForApp()), any(String.class));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/354be99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
index 08ac894..8b8534e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.collector;
 
+import java.util.concurrent.Future;
+
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -49,6 +51,7 @@ public class AppLevelTimelineCollector extends TimelineCollector {
   private final TimelineCollectorContext context;
   private UserGroupInformation currentUser;
   private Token<TimelineDelegationTokenIdentifier> delegationTokenForApp;
+  private Future<?> renewalFuture;
 
   public AppLevelTimelineCollector(ApplicationId appId) {
     this(appId, null);
@@ -70,9 +73,15 @@ public class AppLevelTimelineCollector extends TimelineCollector {
     return appUser;
   }
 
-  void setDelegationTokenForApp(
-      Token<TimelineDelegationTokenIdentifier> token) {
+  void setDelegationTokenAndFutureForApp(
+      Token<TimelineDelegationTokenIdentifier> token,
+      Future<?> appRenewalFuture) {
     this.delegationTokenForApp = token;
+    this.renewalFuture = appRenewalFuture;
+  }
+
+  void setRenewalFutureForApp(Future<?> appRenewalFuture) {
+    this.renewalFuture = appRenewalFuture;
   }
 
   @VisibleForTesting
@@ -100,6 +109,9 @@ public class AppLevelTimelineCollector extends TimelineCollector {
 
   @Override
   protected void serviceStop() throws Exception {
+    if (renewalFuture != null && !renewalFuture.isDone()) {
+      renewalFuture.cancel(true);
+    }
     super.serviceStop();
   }
 
@@ -107,5 +119,4 @@ public class AppLevelTimelineCollector extends TimelineCollector {
   public TimelineCollectorContext getTimelineEntityContext() {
     return context;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/354be99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
index 02362b2..1b1b373 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/NodeTimelineCollectorManager.java
@@ -23,6 +23,9 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.LinkedHashSet;
 import java.util.Set;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -32,6 +35,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -50,6 +54,7 @@ import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -76,6 +81,12 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
 
   private UserGroupInformation loginUGI;
 
+  private ScheduledThreadPoolExecutor tokenRenewalExecutor;
+
+  private long tokenRenewInterval;
+
+  private static final long TIME_BEFORE_RENEW_DATE = 10 * 1000; // 10 seconds.
+
   static final String COLLECTOR_MANAGER_ATTR_KEY = "collector.manager";
 
   @VisibleForTesting
@@ -93,6 +104,9 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
     tokenMgrService = createTokenManagerService();
     addService(tokenMgrService);
     this.loginUGI = UserGroupInformation.getCurrentUser();
+    tokenRenewInterval = conf.getLong(
+        YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL,
+        YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL);
     super.serviceInit(conf);
   }
 
@@ -109,6 +123,9 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
       }
       this.loginUGI = UserGroupInformation.getLoginUser();
     }
+    tokenRenewalExecutor = new ScheduledThreadPoolExecutor(
+        1, new ThreadFactoryBuilder().setNameFormat(
+            "App Collector Token Renewal thread").build());
     super.serviceStart();
     startWebApp();
   }
@@ -139,6 +156,9 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
     if (timelineRestServer != null) {
       timelineRestServer.stop();
     }
+    if (tokenRenewalExecutor != null) {
+      tokenRenewalExecutor.shutdownNow();
+    }
     super.serviceStop();
   }
 
@@ -153,6 +173,21 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
   }
 
   @VisibleForTesting
+  public long renewTokenForAppCollector(
+      AppLevelTimelineCollector appCollector) throws IOException {
+    if (appCollector.getDelegationTokenForApp() != null) {
+      TimelineDelegationTokenIdentifier identifier =
+          appCollector.getDelegationTokenForApp().decodeIdentifier();
+      return tokenMgrService.renewToken(appCollector.getDelegationTokenForApp(),
+          identifier.getRenewer().toString());
+    } else {
+      LOG.info("Delegation token not available for renewal for app " +
+          appCollector.getTimelineEntityContext().getAppId());
+      return -1;
+    }
+  }
+
+  @VisibleForTesting
   public void cancelTokenForAppCollector(
       AppLevelTimelineCollector appCollector) throws IOException {
     if (appCollector.getDelegationTokenForApp() != null) {
@@ -174,13 +209,19 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
             (AppLevelTimelineCollector)collector;
         Token<TimelineDelegationTokenIdentifier> timelineToken =
             generateTokenForAppCollector(appCollector.getAppUser());
-        appCollector.setDelegationTokenForApp(timelineToken);
+        long renewalDelay = (tokenRenewInterval > TIME_BEFORE_RENEW_DATE) ?
+            tokenRenewInterval - TIME_BEFORE_RENEW_DATE : tokenRenewInterval;
+        Future<?> renewalFuture =
+            tokenRenewalExecutor.schedule(new CollectorTokenRenewer(appId),
+                renewalDelay, TimeUnit.MILLISECONDS);
+        appCollector.setDelegationTokenAndFutureForApp(timelineToken,
+            renewalFuture);
         token = org.apache.hadoop.yarn.api.records.Token.newInstance(
             timelineToken.getIdentifier(), timelineToken.getKind().toString(),
             timelineToken.getPassword(), timelineToken.getService().toString());
       }
       // Report to NM if a new collector is added.
-      reportNewCollectorToNM(appId, token);
+      reportNewCollectorInfoToNM(appId, token);
     } catch (YarnException | IOException e) {
       // throw exception here as it cannot be used if failed communicate with NM
       LOG.error("Failed to communicate with NM Collector Service for " + appId);
@@ -192,7 +233,7 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
   protected void postRemove(ApplicationId appId, TimelineCollector collector) {
     if (collector instanceof AppLevelTimelineCollector) {
       try {
-        cancelTokenForAppCollector((AppLevelTimelineCollector)collector);
+        cancelTokenForAppCollector((AppLevelTimelineCollector) collector);
       } catch (IOException e) {
         LOG.warn("Failed to cancel token for app collector with appId " +
             appId, e);
@@ -244,7 +285,7 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
         timelineRestServerBindAddress);
   }
 
-  private void reportNewCollectorToNM(ApplicationId appId,
+  private void reportNewCollectorInfoToNM(ApplicationId appId,
       org.apache.hadoop.yarn.api.records.Token token)
       throws YarnException, IOException {
     ReportNewCollectorInfoRequest request =
@@ -321,4 +362,43 @@ public class NodeTimelineCollectorManager extends TimelineCollectorManager {
   public String getRestServerBindAddress() {
     return timelineRestServerBindAddress;
   }
+
+  private final class CollectorTokenRenewer implements Runnable {
+    private ApplicationId appId;
+    private CollectorTokenRenewer(ApplicationId applicationId) {
+      appId = applicationId;
+    }
+
+    @Override
+    public void run() {
+      TimelineCollector collector = get(appId);
+      if (collector == null) {
+        LOG.info("Cannot find active collector while renewing token for " +
+            appId);
+        return;
+      }
+      AppLevelTimelineCollector appCollector =
+          (AppLevelTimelineCollector) collector;
+
+      synchronized (collector) {
+        if (!collector.isStopped()) {
+          try {
+            long newExpirationTime = renewTokenForAppCollector(appCollector);
+            if (newExpirationTime > 0) {
+              long renewInterval = newExpirationTime - Time.now();
+              long renewalDelay = (renewInterval > TIME_BEFORE_RENEW_DATE) ?
+                  renewInterval - TIME_BEFORE_RENEW_DATE : renewInterval;
+              LOG.info("Renewed token for " + appId + " with new expiration " +
+                  "timestamp = " + newExpirationTime);
+              Future<?> renewalFuture = tokenRenewalExecutor.schedule(
+                  this, renewalDelay, TimeUnit.MILLISECONDS);
+              appCollector.setRenewalFutureForApp(renewalFuture);
+            }
+          } catch (Exception e) {
+            LOG.warn("Unable to renew token for " + appId);
+          }
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/354be99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
index 306806f..8202431 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollector.java
@@ -63,6 +63,8 @@ public abstract class TimelineCollector extends CompositeService {
 
   private volatile boolean readyToAggregate = false;
 
+  private volatile boolean isStopped = false;
+
   public TimelineCollector(String name) {
     super(name);
   }
@@ -79,9 +81,14 @@ public abstract class TimelineCollector extends CompositeService {
 
   @Override
   protected void serviceStop() throws Exception {
+    isStopped = true;
     super.serviceStop();
   }
 
+  boolean isStopped() {
+    return isStopped;
+  }
+
   protected void setWriter(TimelineWriter w) {
     this.writer = w;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/354be99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
index 972bc01..7909a2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/TimelineCollectorManager.java
@@ -184,9 +184,11 @@ public class TimelineCollectorManager extends CompositeService {
     if (collector == null) {
       LOG.error("the collector for " + appId + " does not exist!");
     } else {
-      postRemove(appId, collector);
-      // stop the service to do clean up
-      collector.stop();
+      synchronized (collector) {
+        postRemove(appId, collector);
+        // stop the service to do clean up
+        collector.stop();
+      }
       LOG.info("The collector service for " + appId + " was removed");
     }
     return collector != null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/354be99d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
index de7db58..39f0fc6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/TimelineV2DelegationTokenSecretManagerService.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.server.timeline.security.TimelineDelgationTokenSec
  */
 public class TimelineV2DelegationTokenSecretManagerService extends
     TimelineDelgationTokenSecretManagerService {
+
   public TimelineV2DelegationTokenSecretManagerService() {
     super(TimelineV2DelegationTokenSecretManagerService.class.getName());
   }
@@ -54,6 +55,11 @@ public class TimelineV2DelegationTokenSecretManagerService extends
         getTimelineDelegationTokenSecretManager()).generateToken(ugi, renewer);
   }
 
+  public long renewToken(Token<TimelineDelegationTokenIdentifier> token,
+      String renewer) throws IOException {
+    return getTimelineDelegationTokenSecretManager().renewToken(token, renewer);
+  }
+
   public void cancelToken(Token<TimelineDelegationTokenIdentifier> token,
       String canceller) throws IOException {
     getTimelineDelegationTokenSecretManager().cancelToken(token, canceller);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: YARN-6905. Addendum to fix TestTimelineReaderWebServicesHBaseStorage due to missing FastNumberFormat

Posted by st...@apache.org.
YARN-6905. Addendum to fix TestTimelineReaderWebServicesHBaseStorage due to missing FastNumberFormat


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/512068a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/512068a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/512068a0

Branch: refs/heads/HADOOP-13345
Commit: 512068a01f8a74843d2e17c0d1c7c71cf3224a93
Parents: 16ba4f5
Author: Varun Saxena <va...@apache.org>
Authored: Tue Aug 22 18:48:18 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 .../reader/TestTimelineReaderWebServicesHBaseStorage.java         | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/512068a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 5acf1f4..b2029ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -370,7 +370,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
         ApplicationId appId =
             BuilderUtils.newApplicationId(timestamp, count++);
         ApplicationEntity appEntity = new ApplicationEntity();
-        appEntity.setId(appId.toString());
+        appEntity.setId(
+            HBaseTimelineStorageUtils.convertApplicationIdToString(appId));
         appEntity.setCreatedTime(timestamp);
 
         TimelineEvent created = new TimelineEvent();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: Made fixes for whitespace errors and checstyle warnings before merge.

Posted by st...@apache.org.
Made fixes for whitespace errors and checstyle warnings before merge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d00c8f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d00c8f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d00c8f3

Branch: refs/heads/HADOOP-13345
Commit: 3d00c8f3942da931150de79f42cd4913bf751123
Parents: 512068a
Author: Varun Saxena <va...@apache.org>
Authored: Wed Aug 30 01:17:40 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 .../v2/app/rm/TestRMContainerAllocator.java     | 13 ++++++------
 .../TaskAttemptUnsuccessfulCompletionEvent.java |  6 +++---
 .../mapred/TestMRTimelineEventHandling.java     |  2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |  2 +-
 .../distributedshell/TestDistributedShell.java  |  2 +-
 .../yarn/client/api/TimelineV2Client.java       |  1 -
 .../src/main/resources/yarn-default.xml         |  2 +-
 ...TimelineAuthenticationFilterInitializer.java | 11 +++++-----
 ...TimelineAuthenticationFilterInitializer.java |  4 +++-
 .../security/authorize/NMPolicyProvider.java    | 22 ++++++++++----------
 .../apptoflow/AppToFlowColumnPrefix.java        |  2 +-
 .../storage/reader/EntityTypeReader.java        |  5 ++---
 .../timelineservice/storage/TimelineReader.java |  4 ++--
 13 files changed, 38 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 6c74a7a..e4a8a1a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -142,7 +142,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaS
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
-import org.apache.hadoop.yarn.server.timelineservice.security.TimelineV2DelegationTokenSecretManagerService.TimelineV2DelegationTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.ControlledClock;
@@ -777,7 +776,7 @@ public class TestRMContainerAllocator {
         new Text("renewer"), null);
     ident.setSequenceNumber(1);
     Token<TimelineDelegationTokenIdentifier> collectorToken =
-        new Token<TimelineDelegationTokenIdentifier> (ident.getBytes(),
+        new Token<TimelineDelegationTokenIdentifier>(ident.getBytes(),
         new byte[0], TimelineDelegationTokenIdentifier.KIND_NAME,
         new Text(localAddr));
     org.apache.hadoop.yarn.api.records.Token token =
@@ -825,7 +824,7 @@ public class TestRMContainerAllocator {
     // new token.
     ident.setSequenceNumber(100);
     Token<TimelineDelegationTokenIdentifier> collectorToken1 =
-        new Token<TimelineDelegationTokenIdentifier> (ident.getBytes(),
+        new Token<TimelineDelegationTokenIdentifier>(ident.getBytes(),
         new byte[0], TimelineDelegationTokenIdentifier.KIND_NAME,
         new Text(localAddr));
     token = org.apache.hadoop.yarn.api.records.Token.newInstance(
@@ -3585,15 +3584,15 @@ public class TestRMContainerAllocator {
     }
   }
 
-  private static class MockSchedulerForTimelineCollector
+  private final static class MockSchedulerForTimelineCollector
       implements ApplicationMasterProtocol {
-    CollectorInfo collectorInfo;
+    private CollectorInfo collectorInfo;
 
-    public MockSchedulerForTimelineCollector(CollectorInfo info) {
+    private MockSchedulerForTimelineCollector(CollectorInfo info) {
       this.collectorInfo = info;
     }
 
-    void updateCollectorInfo(CollectorInfo info) {
+    private void updateCollectorInfo(CollectorInfo info) {
       collectorInfo = info;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
index 1529125..9afa093 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptUnsuccessfulCompletionEvent.java
@@ -62,7 +62,7 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent {
   private long startTime;
   private static final Counters EMPTY_COUNTERS = new Counters();
 
-  /** 
+  /**
    * Create an event to record the unsuccessful completion of attempts.
    * @param id Attempt ID
    * @param taskType Type of the task
@@ -227,12 +227,12 @@ public class TaskAttemptUnsuccessfulCompletionEvent implements HistoryEvent {
   public String getHostname() { return hostname; }
   /** Gets the rpc port for the host where the attempt executed. */
   public int getPort() { return port; }
-  
+
   /** Gets the rack name of the node where the attempt ran. */
   public String getRackName() {
     return rackName == null ? null : rackName.toString();
   }
-  
+
   /** Gets the error string. */
   public String getError() { return error.toString(); }
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index 9434d46..19313d3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -398,7 +398,7 @@ public class TestMRTimelineEventHandling {
 
           LOG.info("strLine.trim()= " + strLine.trim());
           if (checkIdPrefix) {
-            Assert.assertTrue("Entity ID prefix expected to be > 0" ,
+            Assert.assertTrue("Entity ID prefix expected to be > 0",
                 entity.getIdPrefix() > 0);
             if (idPrefix == -1) {
               idPrefix = entity.getIdPrefix();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index be45ddf..4944821 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2116,7 +2116,7 @@ public class YarnConfiguration extends Configuration {
       TIMELINE_SERVICE_PREFIX + "reader.class";
 
   public static final String DEFAULT_TIMELINE_SERVICE_READER_CLASS =
-      "org.apache.hadoop.yarn.server.timelineservice.storage" + 
+      "org.apache.hadoop.yarn.server.timelineservice.storage" +
           ".HBaseTimelineReaderImpl";
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index 47485ae..fc270cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -652,7 +652,7 @@ public class TestDistributedShell {
           if (checkIdPrefix) {
             TimelineEntity entity = FileSystemTimelineReaderImpl.
                 getTimelineRecordFromJSON(entityLine, TimelineEntity.class);
-            Assert.assertTrue("Entity ID prefix expected to be > 0" ,
+            Assert.assertTrue("Entity ID prefix expected to be > 0",
                 entity.getIdPrefix() > 0);
             if (idPrefix == -1) {
               idPrefix = entity.getIdPrefix();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
index da81a91..423c059 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.CollectorInfo;
-import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index ae022e4..17d0496 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3188,7 +3188,7 @@
     <name>yarn.timeline-service.http-cross-origin.enabled</name>
     <value>false</value>
   </property>
-  
+ 
   <property>
     <description>
       Flag to enable cross-origin (CORS) support for timeline service v1.x or

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
index 06f9868e..3d8ce05 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
@@ -53,7 +53,8 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
   /**
    * The configuration prefix of timeline HTTP authentication.
    */
-  public static final String PREFIX = "yarn.timeline-service.http-authentication.";
+  public static final String PREFIX =
+      "yarn.timeline-service.http-authentication.";
 
   @VisibleForTesting
   Map<String, String> filterConfig;
@@ -104,15 +105,15 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
   }
 
   /**
-   * Initializes {@link TimelineAuthenticationFilter}
+   * Initializes {@link TimelineAuthenticationFilter}.
    * <p>
    * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
-   * configuration properties prefixed with {@value #PREFIX}
+   * configuration properties prefixed with {@value #PREFIX}.
    *
    * @param container
-   *          The filter container
+   *          The filter container.
    * @param conf
-   *          Configuration for run-time parameters
+   *          Configuration for run-time parameters.
    */
   @Override
   public void initFilter(FilterContainer container, Configuration conf) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
index 430911e..44f63ea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
@@ -27,7 +27,9 @@ import static org.apache.hadoop.yarn.server.timeline.security.TimelineAuthentica
 import org.junit.Test;
 import org.mockito.Mockito;
 
-
+/**
+ * Tests {@link TimelineAuthenticationFilterInitializer}.
+ */
 public class TestTimelineAuthenticationFilterInitializer {
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
index cc668f7..7b28659 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
@@ -33,21 +33,21 @@ import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB;
 @InterfaceStability.Unstable
 public class NMPolicyProvider extends PolicyProvider {
   
-  private static final Service[] nodeManagerServices = 
+  private static final Service[] NODE_MANAGER_SERVICES =
       new Service[] {
-    new Service(
-        YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL, 
-        ContainerManagementProtocolPB.class),
-    new Service(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER, 
-        LocalizationProtocolPB.class),
-    new Service(YarnConfiguration.
-        YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL,
+          new Service(YarnConfiguration.
+            YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL,
+            ContainerManagementProtocolPB.class),
+          new Service(YarnConfiguration.
+            YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER,
+            LocalizationProtocolPB.class),
+          new Service(YarnConfiguration.
+            YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL,
             CollectorNodemanagerProtocolPB.class)
-  };
+      };
 
   @Override
   public Service[] getServices() {
-    return nodeManagerServices;
+    return NODE_MANAGER_SERVICES;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
index f1e4495..752a380 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
@@ -58,7 +58,7 @@ public enum AppToFlowColumnPrefix implements ColumnPrefix<AppToFlowTable> {
   private final String columnPrefix;
   private final byte[] columnPrefixBytes;
 
-  private AppToFlowColumnPrefix(ColumnFamily<AppToFlowTable> columnFamily,
+  AppToFlowColumnPrefix(ColumnFamily<AppToFlowTable> columnFamily,
       String columnPrefix) {
     this.columnFamily = columnFamily;
     this.columnPrefix = columnPrefix;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
index fd85878..05570f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
@@ -91,9 +91,8 @@ public final class EntityTypeReader extends AbstractTimelineStorageReader {
 
     int counter = 0;
     while (true) {
-      try (ResultScanner results
-          = getResult(hbaseConf, conn, typeFilterList, currRowKey, nextRowKey))
-      {
+      try (ResultScanner results =
+          getResult(hbaseConf, conn, typeFilterList, currRowKey, nextRowKey)) {
         TimelineEntity entity = parseEntityForType(results.next());
         if (entity == null) {
           break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d00c8f3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
index 1e77155..16d623a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineReader.java
@@ -125,7 +125,7 @@ public interface TimelineReader extends Service {
    *    <li><b>flowRunId</b> - Context flow run id.</li>
    *    <li><b>appId</b> - Context app id.</li>
    *    </ul>
-   *    Although entityIdPrefix and entityId are also part of context, 
+   *    Although entityIdPrefix and entityId are also part of context,
    *    it has no meaning for getEntities.<br>
    *    Fields in context which are mandatory depends on entity type. Entity
    *    type is always mandatory. In addition to entity type, below is the list
@@ -161,7 +161,7 @@ public interface TimelineReader extends Service {
    *    {@link TimelineDataToRetrieve} for details.
    * @return A set of <cite>TimelineEntity</cite> instances of the given entity
    *    type in the given context scope which matches the given predicates
-   *    ordered by enitityIdPrefix(for generic entities only). 
+   *    ordered by enitityIdPrefix(for generic entities only).
    *    Each entity will only contain
    *    the metadata(id, type , idPrefix and created time) plus the given
    *    fields to retrieve.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HDFS-12356. Unit test for JournalNode sync during Rolling Upgrade. Contributed by Hanisha Koneru.

Posted by st...@apache.org.
HDFS-12356. Unit test for JournalNode sync during Rolling Upgrade. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd66a243
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd66a243
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd66a243

Branch: refs/heads/HADOOP-13345
Commit: fd66a243bfffc8260bfd69058625d4d9509cafe6
Parents: d04f85f
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Aug 30 10:29:42 2017 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Aug 30 10:29:42 2017 -0700

----------------------------------------------------------------------
 .../qjournal/server/TestJournalNodeSync.java    | 176 +++++++++++++++----
 1 file changed, 137 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd66a243/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
index 2964f05..09ef3a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeSync.java
@@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs.qjournal.server;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -54,10 +56,11 @@ public class TestJournalNodeSync {
   private MiniQJMHACluster qjmhaCluster;
   private MiniDFSCluster dfsCluster;
   private MiniJournalCluster jCluster;
-  private FileSystem fs;
   private FSNamesystem namesystem;
   private int editsPerformed = 0;
   private final String jid = "ns1";
+  private int activeNNindex=0;
+  private static final int DFS_HA_TAILEDITS_PERIOD_SECONDS=1;
 
   @Rule
   public TestName testName = new TestName();
@@ -71,13 +74,16 @@ public class TestJournalNodeSync {
         "testSyncAfterJNdowntimeWithoutQJournalQueue")) {
       conf.setInt(DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY, 0);
     }
+    if (testName.getMethodName().equals("testSyncDuringRollingUpgrade")) {
+      conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,
+          DFS_HA_TAILEDITS_PERIOD_SECONDS);
+    }
     qjmhaCluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2)
       .build();
     dfsCluster = qjmhaCluster.getDfsCluster();
     jCluster = qjmhaCluster.getJournalCluster();
 
     dfsCluster.transitionToActive(0);
-    fs = dfsCluster.getFileSystem(0);
     namesystem = dfsCluster.getNamesystem(0);
   }
 
@@ -192,36 +198,7 @@ public class TestJournalNodeSync {
   // the journals.
   @Test(timeout=60000)
   public void testRandomJournalMissingLogs() throws Exception {
-    Random randomJournal = new Random();
-
-    List<File> journalCurrentDirs = Lists.newArrayList();
-
-    for (int i = 0; i < 3; i++) {
-      journalCurrentDirs.add(new StorageDirectory(jCluster.getJournalDir(i,
-          jid)).getCurrentDir());
-    }
-
-    int count = 0;
-    long lastStartTxId;
-    int journalIndex;
-    List<File> missingLogs = Lists.newArrayList();
-    while (count < 5) {
-      lastStartTxId = generateEditLog();
-
-      // Delete the last edit log segment from randomly selected journal node
-      journalIndex = randomJournal.nextInt(3);
-      missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex),
-          lastStartTxId));
-
-      // Delete the last edit log segment from two journals for some logs
-      if (count % 2 == 0) {
-        journalIndex = (journalIndex + 1) % 3;
-        missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex),
-            lastStartTxId));
-      }
-
-      count++;
-    }
+    List<File> missingLogs = deleteEditLogsFromRandomJN();
 
     GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
   }
@@ -277,7 +254,8 @@ public class TestJournalNodeSync {
    */
   @Test (timeout=300_000)
   public void testSyncAfterJNdowntimeWithoutQJournalQueue() throws Exception{
-    // Queuing is disabled during the cluster setup {@link #setUpMiniCluster()}
+    // QJournal Queuing is disabled during the cluster setup
+    // {@link #setUpMiniCluster()}
     File firstJournalDir = jCluster.getJournalDir(0, jid);
     File firstJournalCurrentDir = new StorageDirectory(firstJournalDir)
         .getCurrentDir();
@@ -376,11 +354,88 @@ public class TestJournalNodeSync {
     GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
   }
 
+  // Test JournalNode Sync during a Rolling Upgrade of NN.
+  @Test (timeout=300_000)
+  public void testSyncDuringRollingUpgrade() throws Exception {
+
+    DistributedFileSystem dfsActive;
+    int standbyNNindex;
+
+    if (dfsCluster.getNameNode(0).isActiveState()) {
+      activeNNindex = 0;
+      standbyNNindex = 1;
+    } else {
+      activeNNindex = 1;
+      standbyNNindex = 0;
+    }
+    dfsActive = dfsCluster.getFileSystem(activeNNindex);
+
+    // Prepare for rolling upgrade
+    final RollingUpgradeInfo info = dfsActive.rollingUpgrade(
+          HdfsConstants.RollingUpgradeAction.PREPARE);
+
+    //query rolling upgrade
+    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+        HdfsConstants.RollingUpgradeAction.QUERY));
+
+    // Restart the Standby NN with rollingUpgrade option
+    dfsCluster.restartNameNode(standbyNNindex, true,
+        "-rollingUpgrade", "started");
+    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+        HdfsConstants.RollingUpgradeAction.QUERY));
+
+    // Do some edits and delete some edit logs
+    List<File> missingLogs = deleteEditLogsFromRandomJN();
+
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
+
+    // Transition the active NN to standby and standby to active
+    dfsCluster.transitionToStandby(activeNNindex);
+
+    // Let Standby NN catch up tailing edit logs before transitioning it to
+    // active
+    Thread.sleep(30*DFS_HA_TAILEDITS_PERIOD_SECONDS*1000);
+
+    dfsCluster.transitionToActive(standbyNNindex);
+    dfsCluster.waitActive();
+
+    activeNNindex=standbyNNindex;
+    standbyNNindex=((activeNNindex+1)%2);
+    dfsActive = dfsCluster.getFileSystem(activeNNindex);
+
+    Assert.assertTrue(dfsCluster.getNameNode(activeNNindex).isActiveState());
+    Assert.assertFalse(dfsCluster.getNameNode(standbyNNindex).isActiveState());
+
+    // Restart the current standby NN (previously active)
+    dfsCluster.restartNameNode(standbyNNindex, true,
+        "-rollingUpgrade", "started");
+    Assert.assertEquals(info, dfsActive.rollingUpgrade(
+        HdfsConstants.RollingUpgradeAction.QUERY));
+    dfsCluster.waitActive();
+
+    // Do some edits and delete some edit logs
+    missingLogs.addAll(deleteEditLogsFromRandomJN());
+
+    // Check that JNSync downloaded the edit logs rolled during rolling upgrade.
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
+
+    //finalize rolling upgrade
+    final RollingUpgradeInfo finalize = dfsActive.rollingUpgrade(
+        HdfsConstants.RollingUpgradeAction.FINALIZE);
+    Assert.assertTrue(finalize.isFinalized());
+
+    // Check the missing edit logs exist after finalizing rolling upgrade
+    for (File editLog : missingLogs) {
+      Assert.assertTrue("Edit log missing after finalizing rolling upgrade",
+          editLog.exists());
+    }
+  }
+
   private File deleteEditLog(File currentDir, long startTxId)
       throws IOException {
     EditLogFile logFile = getLogFile(currentDir, startTxId);
     while (logFile.isInProgress()) {
-      dfsCluster.getNameNode(0).getRpcServer().rollEditLog();
+      dfsCluster.getNameNode(activeNNindex).getRpcServer().rollEditLog();
       logFile = getLogFile(currentDir, startTxId);
     }
     File deleteFile = logFile.getFile();
@@ -389,13 +444,55 @@ public class TestJournalNodeSync {
     return deleteFile;
   }
 
+  private List<File> deleteEditLogsFromRandomJN() throws IOException {
+    Random random = new Random();
+
+    List<File> journalCurrentDirs = Lists.newArrayList();
+
+    for (int i = 0; i < 3; i++) {
+      journalCurrentDirs.add(new StorageDirectory(jCluster.getJournalDir(i,
+          jid)).getCurrentDir());
+    }
+
+    long[] startTxIds = new long[20];
+    for (int i = 0; i < 20; i++) {
+      startTxIds[i] = generateEditLog();
+    }
+
+    int count = 0, startTxIdIndex;
+    long startTxId;
+    int journalIndex;
+    List<File> missingLogs = Lists.newArrayList();
+    List<Integer> deletedStartTxIds = Lists.newArrayList();
+    while (count < 5) {
+      // Select a random edit log to delete
+      startTxIdIndex = random.nextInt(20);
+      while (deletedStartTxIds.contains(startTxIdIndex)) {
+        startTxIdIndex = random.nextInt(20);
+      }
+      startTxId = startTxIds[startTxIdIndex];
+      deletedStartTxIds.add(startTxIdIndex);
+
+      // Delete the randomly selected edit log segment from randomly selected
+      // journal node
+      journalIndex = random.nextInt(3);
+      missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex),
+          startTxId));
+
+      count++;
+    }
+
+    return missingLogs;
+  }
+
   /**
    * Do a mutative metadata operation on the file system.
    *
    * @return true if the operation was successful, false otherwise.
    */
   private boolean doAnEdit() throws IOException {
-    return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
+    return dfsCluster.getFileSystem(activeNNindex).mkdirs(
+        new Path("/tmp", Integer.toString(editsPerformed++)));
   }
 
   /**
@@ -414,12 +511,13 @@ public class TestJournalNodeSync {
    * @return the startTxId of next segment after rolling edits.
    */
   private long generateEditLog(int numEdits) throws IOException {
-    long startTxId = namesystem.getFSImage().getEditLog().getLastWrittenTxId();
+    long lastWrittenTxId = dfsCluster.getNameNode(activeNNindex).getFSImage()
+        .getEditLog().getLastWrittenTxId();
     for (int i = 1; i <= numEdits; i++) {
       Assert.assertTrue("Failed to do an edit", doAnEdit());
     }
-    dfsCluster.getNameNode(0).getRpcServer().rollEditLog();
-    return startTxId;
+    dfsCluster.getNameNode(activeNNindex).getRpcServer().rollEditLog();
+    return lastWrittenTxId;
   }
 
   private Supplier<Boolean> editLogExists(List<File> editLogs) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: YARN-4455. Support fetching metrics by time range. Contributed by Varun Saxena.

Posted by st...@apache.org.
YARN-4455. Support fetching metrics by time range. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70078e91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70078e91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70078e91

Branch: refs/heads/HADOOP-13345
Commit: 70078e91e3287aad51f6ddf6acd9ed75e7c6760d
Parents: 6604131
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Thu Jul 20 12:16:06 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 ...stTimelineReaderWebServicesHBaseStorage.java | 207 ++++++++++++++++-
 .../storage/DataGeneratorForTest.java           |  27 +--
 .../storage/TestHBaseTimelineStorageApps.java   | 209 ++++++++++++-----
 .../TestHBaseTimelineStorageEntities.java       | 166 ++++++++++----
 .../storage/flow/TestHBaseStorageFlowRun.java   |  18 +-
 .../storage/common/ColumnHelper.java            |   1 -
 .../common/HBaseTimelineStorageUtils.java       |  17 ++
 .../storage/reader/ApplicationEntityReader.java |  14 ++
 .../storage/reader/GenericEntityReader.java     |  12 +-
 .../reader/TimelineDataToRetrieve.java          |  35 ++-
 .../reader/TimelineReaderWebServices.java       | 226 ++++++++++++++++---
 .../reader/TimelineReaderWebServicesUtils.java  |   6 +-
 .../TestFileSystemTimelineReaderImpl.java       |  15 +-
 13 files changed, 777 insertions(+), 176 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index 6b0f95e..302f8e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -328,7 +328,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
       userEntity.setType("entitytype");
       userEntity.setId("entityid-" + i);
       userEntity.setIdPrefix(11 - i);
-      userEntity.setCreatedTime(System.currentTimeMillis());
+      userEntity.setCreatedTime(ts);
       userEntities.addEntity(userEntity);
     }
 
@@ -344,7 +344,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
           flowVersion2, runid2, entity3.getId(), te3);
       hbi.write(cluster, user, flow, flowVersion, runid,
           "application_1111111111_1111", userEntities);
-      writeApplicationEntities(hbi);
+      writeApplicationEntities(hbi, ts);
       hbi.flush();
     } finally {
       if (hbi != null) {
@@ -353,26 +353,25 @@ public class TestTimelineReaderWebServicesHBaseStorage
     }
   }
 
-  static void writeApplicationEntities(HBaseTimelineWriterImpl hbi)
-      throws IOException {
-    long currentTimeMillis = System.currentTimeMillis();
+  static void writeApplicationEntities(HBaseTimelineWriterImpl hbi,
+      long timestamp) throws IOException {
     int count = 1;
     for (long i = 1; i <= 3; i++) {
       for (int j = 1; j <= 5; j++) {
         TimelineEntities te = new TimelineEntities();
         ApplicationId appId =
-            BuilderUtils.newApplicationId(currentTimeMillis, count++);
+            BuilderUtils.newApplicationId(timestamp, count++);
         ApplicationEntity appEntity = new ApplicationEntity();
         appEntity.setId(appId.toString());
-        appEntity.setCreatedTime(currentTimeMillis);
+        appEntity.setCreatedTime(timestamp);
 
         TimelineEvent created = new TimelineEvent();
         created.setId(ApplicationMetricsConstants.CREATED_EVENT_TYPE);
-        created.setTimestamp(currentTimeMillis);
+        created.setTimestamp(timestamp);
         appEntity.addEvent(created);
         TimelineEvent finished = new TimelineEvent();
         finished.setId(ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
-        finished.setTimestamp(currentTimeMillis + i * j);
+        finished.setTimestamp(timestamp + i * j);
 
         appEntity.addEvent(finished);
         te.addEntity(appEntity);
@@ -1775,6 +1774,113 @@ public class TestTimelineReaderWebServicesHBaseStorage
     }
   }
 
+  private static void verifyMetricCount(TimelineEntity entity,
+      int expectedMetricsCnt, int expectedMeticsValCnt) {
+    int metricsValCnt = 0;
+    for (TimelineMetric m : entity.getMetrics()) {
+      metricsValCnt += m.getValues().size();
+    }
+    assertEquals(expectedMetricsCnt, entity.getMetrics().size());
+    assertEquals(expectedMeticsValCnt, metricsValCnt);
+  }
+
+  private static void verifyMetricsCount(Set<TimelineEntity> entities,
+      int expectedMetricsCnt, int expectedMeticsValCnt) {
+    int metricsCnt = 0;
+    int metricsValCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricsCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        metricsValCnt += m.getValues().size();
+      }
+    }
+    assertEquals(expectedMetricsCnt, metricsCnt);
+    assertEquals(expectedMeticsValCnt, metricsValCnt);
+  }
+
+  @Test
+  public void testGetEntitiesMetricsTimeRange() throws Exception {
+    Client client = createClient();
+    try {
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 90000) + "&metricstimeend=" + (ts - 80000));
+      ClientResponse resp = getResponse(client, uri);
+      Set<TimelineEntity> entities =
+          resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 4);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 100000) + "&metricstimeend=" + (ts - 80000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 5, 9);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 100000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 5, 9);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimeend=" +
+          (ts - 90000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 5, 5);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricstimestart=" +
+          (ts - 100000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 5, 5);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1/entity2?fields=ALL&metricstimestart=" +
+          (ts - 100000) + "&metricstimeend=" + (ts - 80000));
+      resp = getResponse(client, uri);
+      TimelineEntity entity = resp.getEntity(TimelineEntity.class);
+      assertNotNull(entity);
+      verifyMetricCount(entity, 3, 3);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1/entity2?fields=ALL&metricslimit=5&metricstimestart=" +
+          (ts - 100000) + "&metricstimeend=" + (ts - 80000));
+      resp = getResponse(client, uri);
+      entity = resp.getEntity(TimelineEntity.class);
+      assertNotNull(entity);
+      verifyMetricCount(entity, 3, 5);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/apps/application_1111111111_1111/" +
+          "entities/type1?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 80000) + "&metricstimeend=" + (ts - 90000));
+      verifyHttpResponse(client, uri, Status.BAD_REQUEST);
+    } finally {
+      client.destroy();
+    }
+  }
+
   /**
    * Tests if specific configs and metrics are retrieve for getEntity call.
    */
@@ -2378,4 +2484,87 @@ public class TestTimelineReaderWebServicesHBaseStorage
       client.destroy();
     }
   }
+
+  @Test
+  public void testGetAppsMetricsRange() throws Exception {
+    Client client = createClient();
+    try {
+      URI uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
+          "1002345678919/apps?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 200000) + "&metricstimeend=" + (ts - 100000));
+      ClientResponse resp = getResponse(client, uri);
+      Set<TimelineEntity> entities =
+          resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 4);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
+          "1002345678919/apps?fields=ALL&metricslimit=100");
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 10);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/" +
+          "apps?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 200000) + "&metricstimeend=" + (ts - 100000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(3, entities.size());
+      verifyMetricsCount(entities, 5, 5);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/" +
+          "apps?fields=ALL&metricslimit=100");
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(3, entities.size());
+      verifyMetricsCount(entities, 5, 12);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
+          "1002345678919/apps?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 200000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 10);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/runs/" +
+          "1002345678919/apps?fields=ALL&metricslimit=100&metricstimeend=" +
+          (ts - 100000));
+      resp = getResponse(client, uri);
+      entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){});
+      assertNotNull(entities);
+      assertEquals(2, entities.size());
+      verifyMetricsCount(entities, 4, 4);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/apps/application_1111111111_1111?userid=user1&fields=ALL" +
+          "&flowname=flow_name&flowrunid=1002345678919&metricslimit=100" +
+          "&metricstimestart=" +(ts - 200000) + "&metricstimeend=" +
+          (ts - 100000));
+      resp = getResponse(client, uri);
+      TimelineEntity entity = resp.getEntity(TimelineEntity.class);
+      assertNotNull(entity);
+      verifyMetricCount(entity, 3, 3);
+
+      uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +
+          "timeline/clusters/cluster1/users/user1/flows/flow_name/" +
+          "apps?fields=ALL&metricslimit=100&metricstimestart=" +
+          (ts - 100000) + "&metricstimeend=" + (ts - 200000));
+      verifyHttpResponse(client, uri, Status.BAD_REQUEST);
+    } finally {
+      client.destroy();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
index 1a522fa..926d8bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/DataGeneratorForTest.java
@@ -58,7 +58,8 @@ public final class DataGeneratorForTest {
     TimelineSchemaCreator.createAllTables(conf, false);
   }
 
-  public static void loadApps(HBaseTestingUtility util) throws IOException {
+  public static void loadApps(HBaseTestingUtility util, long ts)
+      throws IOException {
     TimelineEntities te = new TimelineEntities();
     TimelineEntity entity = new TimelineEntity();
     String id = "application_1111111111_2222";
@@ -92,7 +93,6 @@ public final class DataGeneratorForTest {
     entity.addConfigs(conf);
     // add metrics
     Set<TimelineMetric> metrics = new HashSet<>();
-    long ts = System.currentTimeMillis();
     metrics.add(getMetric4(ts));
 
     TimelineMetric m12 = new TimelineMetric();
@@ -137,7 +137,7 @@ public final class DataGeneratorForTest {
     entity1.addConfigs(conf1);
 
     // add metrics
-    entity1.addMetrics(getMetrics4());
+    entity1.addMetrics(getMetrics4(ts));
     TimelineEvent event11 = new TimelineEvent();
     event11.setId("end_event");
     event11.setTimestamp(ts);
@@ -175,18 +175,17 @@ public final class DataGeneratorForTest {
     }
   }
 
-  private static Set<TimelineMetric> getMetrics4() {
+  private static Set<TimelineMetric> getMetrics4(long ts) {
     Set<TimelineMetric> metrics1 = new HashSet<>();
     TimelineMetric m2 = new TimelineMetric();
     m2.setId("MAP1_SLOT_MILLIS");
-    long ts1 = System.currentTimeMillis();
     Map<Long, Number> metricValues1 = new HashMap<>();
-    metricValues1.put(ts1 - 120000, 100000000);
-    metricValues1.put(ts1 - 100000, 200000000);
-    metricValues1.put(ts1 - 80000, 300000000);
-    metricValues1.put(ts1 - 60000, 400000000);
-    metricValues1.put(ts1 - 40000, 50000000000L);
-    metricValues1.put(ts1 - 20000, 60000000000L);
+    metricValues1.put(ts - 120000, 100000000);
+    metricValues1.put(ts - 100000, 200000000);
+    metricValues1.put(ts - 80000, 300000000);
+    metricValues1.put(ts - 60000, 400000000);
+    metricValues1.put(ts - 40000, 50000000000L);
+    metricValues1.put(ts - 20000, 60000000000L);
     m2.setType(Type.TIME_SERIES);
     m2.setValues(metricValues1);
     metrics1.add(m2);
@@ -307,7 +306,7 @@ public final class DataGeneratorForTest {
     return metricValues;
   }
 
-  public static void loadEntities(HBaseTestingUtility util)
+  public static void loadEntities(HBaseTestingUtility util, long ts)
       throws IOException {
     TimelineEntities te = new TimelineEntities();
     TimelineEntity entity = new TimelineEntity();
@@ -332,7 +331,6 @@ public final class DataGeneratorForTest {
     Set<TimelineMetric> metrics = new HashSet<>();
     TimelineMetric m1 = new TimelineMetric();
     m1.setId("MAP_SLOT_MILLIS");
-    long ts = System.currentTimeMillis();
     m1.setType(Type.TIME_SERIES);
     m1.setValues(getMetricValues1(ts));
     metrics.add(m1);
@@ -383,9 +381,8 @@ public final class DataGeneratorForTest {
     Set<TimelineMetric> metrics1 = new HashSet<>();
     TimelineMetric m2 = new TimelineMetric();
     m2.setId("MAP1_SLOT_MILLIS");
-    long ts1 = System.currentTimeMillis();
     m2.setType(Type.TIME_SERIES);
-    m2.setValues(getMetricValues2(ts1));
+    m2.setValues(getMetricValues2(ts));
     metrics1.add(m2);
     entity1.addMetrics(metrics1);
     te.addEntity(entity1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index 7eb9ad1..b227185 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -87,13 +87,14 @@ public class TestHBaseTimelineStorageApps {
 
   private static HBaseTestingUtility util;
   private HBaseTimelineReaderImpl reader;
+  private static final long CURRENT_TIME = System.currentTimeMillis();
 
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
     util = new HBaseTestingUtility();
     util.startMiniCluster();
     DataGeneratorForTest.createSchema(util.getConfiguration());
-    DataGeneratorForTest.loadApps(util);
+    DataGeneratorForTest.loadApps(util, CURRENT_TIME);
   }
 
   @Before
@@ -236,13 +237,12 @@ public class TestHBaseTimelineStorageApps {
     TimelineMetric m1 = new TimelineMetric();
     m1.setId("MAP_SLOT_MILLIS");
     Map<Long, Number> metricValues = new HashMap<Long, Number>();
-    long ts = System.currentTimeMillis();
-    metricValues.put(ts - 120000, 100000000);
-    metricValues.put(ts - 100000, 200000000);
-    metricValues.put(ts - 80000, 300000000);
-    metricValues.put(ts - 60000, 400000000);
-    metricValues.put(ts - 40000, 50000000000L);
-    metricValues.put(ts - 20000, 60000000000L);
+    metricValues.put(CURRENT_TIME - 120000, 100000000);
+    metricValues.put(CURRENT_TIME - 100000, 200000000);
+    metricValues.put(CURRENT_TIME - 80000, 300000000);
+    metricValues.put(CURRENT_TIME - 60000, 400000000);
+    metricValues.put(CURRENT_TIME - 40000, 50000000000L);
+    metricValues.put(CURRENT_TIME - 20000, 60000000000L);
     m1.setType(Type.TIME_SERIES);
     m1.setValues(metricValues);
     metrics.add(m1);
@@ -259,7 +259,7 @@ public class TestHBaseTimelineStorageApps {
     TimelineMetric aggMetric = new TimelineMetric();
     aggMetric.setId("MEM_USAGE");
     Map<Long, Number> aggMetricValues = new HashMap<Long, Number>();
-    long aggTs = ts;
+    long aggTs = CURRENT_TIME;
     aggMetricValues.put(aggTs - 120000, 102400000L);
     aggMetric.setType(Type.SINGLE_VALUE);
     aggMetric.setRealtimeAggregationOp(TimelineMetricOperation.SUM);
@@ -380,7 +380,7 @@ public class TestHBaseTimelineStorageApps {
           new TimelineReaderContext(cluster, user, flow, runid, appId,
           entity.getType(), entity.getId()),
           new TimelineDataToRetrieve(null, null,
-          EnumSet.of(TimelineReader.Field.ALL), Integer.MAX_VALUE));
+          EnumSet.of(TimelineReader.Field.ALL), Integer.MAX_VALUE, null, null));
       assertNotNull(e1);
 
       // verify attributes
@@ -423,7 +423,7 @@ public class TestHBaseTimelineStorageApps {
       e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow,
           runid, appId, entity.getType(), entity.getId()),
           new TimelineDataToRetrieve(null, null,
-          EnumSet.of(TimelineReader.Field.ALL), 3));
+          EnumSet.of(TimelineReader.Field.ALL), 3, null, null));
       assertNotNull(e1);
       assertEquals(appId, e1.getId());
       assertEquals(TimelineEntityType.YARN_APPLICATION.toString(),
@@ -444,7 +444,7 @@ public class TestHBaseTimelineStorageApps {
       e1 = reader.getEntity(
           new TimelineReaderContext(cluster, user, flow, runid, appId,
          entity.getType(), entity.getId()), new TimelineDataToRetrieve(
-         null, null, EnumSet.of(TimelineReader.Field.ALL), null));
+         null, null, EnumSet.of(TimelineReader.Field.ALL), null, null, null));
       assertNotNull(e1);
       assertEquals(appId, e1.getId());
       assertEquals(TimelineEntityType.YARN_APPLICATION.toString(),
@@ -465,9 +465,9 @@ public class TestHBaseTimelineStorageApps {
             metric.getId().equals("MEM_USAGE"));
         assertEquals(1, metric.getValues().size());
         if (metric.getId().equals("MAP_SLOT_MILLIS")) {
-          assertTrue(metric.getValues().containsKey(ts - 20000));
-          assertEquals(metricValues.get(ts - 20000),
-              metric.getValues().get(ts - 20000));
+          assertTrue(metric.getValues().containsKey(CURRENT_TIME - 20000));
+          assertEquals(metricValues.get(CURRENT_TIME - 20000),
+              metric.getValues().get(CURRENT_TIME - 20000));
         }
         if (metric.getId().equals("MEM_USAGE")) {
           assertTrue(metric.getValues().containsKey(aggTs - 120000));
@@ -554,11 +554,13 @@ public class TestHBaseTimelineStorageApps {
       TimelineEntity e1 = reader.getEntity(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       TimelineEntity e2 = reader.getEntity(
           new TimelineReaderContext(cluster, user, null, null, appName,
           entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       assertNotNull(e1);
       assertNotNull(e2);
       assertEquals(e1, e2);
@@ -652,7 +654,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1111111111_2222",
         TimelineEntityType.YARN_APPLICATION.toString(), null),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertNotNull(entity);
     assertEquals(3, entity.getConfigs().size());
     assertEquals(1, entity.getIsRelatedToEntities().size());
@@ -661,7 +664,8 @@ public class TestHBaseTimelineStorageApps {
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
         new TimelineEntityFilters.Builder().build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(3, entities.size());
     int cfgCnt = 0;
     int metricCnt = 0;
@@ -775,17 +779,17 @@ public class TestHBaseTimelineStorageApps {
         1002345678919L, "application_1111111111_2222",
         TimelineEntityType.YARN_APPLICATION.toString(), null),
         new TimelineDataToRetrieve(
-        null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null));
+        null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null, null, null));
     assertNotNull(e1);
     assertEquals(3, e1.getConfigs().size());
     assertEquals(0, e1.getIsRelatedToEntities().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
-        null),
-        new TimelineEntityFilters.Builder().build(),
+        null), new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(
-        null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null));
+        null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null,
+        null, null));
     assertEquals(3, es1.size());
     int metricsCnt = 0;
     int isRelatedToCnt = 0;
@@ -814,7 +818,8 @@ public class TestHBaseTimelineStorageApps {
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
         new TimelineEntityFilters.Builder().isRelatedTo(irt).build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     int isRelatedToCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -966,7 +971,8 @@ public class TestHBaseTimelineStorageApps {
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
         new TimelineEntityFilters.Builder().relatesTo(rt).build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     int relatesToCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -1204,7 +1210,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().configFilters(confFilterList)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(2, entities.size());
     int cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1218,7 +1224,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().configFilters(confFilterList)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1236,7 +1243,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().configFilters(confFilterList1)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(1, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1256,7 +1263,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().configFilters(confFilterList2)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList3 = new TimelineFilterList(
@@ -1269,7 +1276,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().configFilters(confFilterList3)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList4 = new TimelineFilterList(
@@ -1282,7 +1289,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().configFilters(confFilterList4)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList5 = new TimelineFilterList(
@@ -1295,7 +1302,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().configFilters(confFilterList5)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(3, entities.size());
   }
 
@@ -1311,7 +1318,8 @@ public class TestHBaseTimelineStorageApps {
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
         new TimelineEntityFilters.Builder().eventFilters(ef).build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(1, entities.size());
     int eventCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -1433,7 +1441,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1111111111_2222",
         TimelineEntityType.YARN_APPLICATION.toString(), null),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     assertNotNull(e1);
     assertEquals(1, e1.getConfigs().size());
     Set<TimelineEntity> es1 = reader.getEntities(
@@ -1441,7 +1449,7 @@ public class TestHBaseTimelineStorageApps {
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null) ,
         new TimelineEntityFilters.Builder().build(),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     int cfgCnt = 0;
     for (TimelineEntity entity : es1) {
       cfgCnt += entity.getConfigs().size();
@@ -1467,7 +1475,7 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().configFilters(confFilterList)
             .build(),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     assertEquals(1, entities.size());
     int cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1500,7 +1508,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().configFilters(confFilterList1)
             .build(),
-        new TimelineDataToRetrieve(confsToRetrieve, null, null, null));
+        new TimelineDataToRetrieve(confsToRetrieve, null, null, null, null,
+        null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1532,7 +1541,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(2, entities.size());
     int metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1546,7 +1555,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1566,7 +1576,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(1, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1586,7 +1596,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList3 = new TimelineFilterList(
@@ -1599,7 +1609,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList4 = new TimelineFilterList(
@@ -1612,7 +1622,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList5 = new TimelineFilterList(
@@ -1625,7 +1635,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList5)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(3, entities.size());
   }
 
@@ -1638,7 +1648,7 @@ public class TestHBaseTimelineStorageApps {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1111111111_2222",
         TimelineEntityType.YARN_APPLICATION.toString(), null),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     assertNotNull(e1);
     assertEquals(1, e1.getMetrics().size());
     Set<TimelineEntity> es1 = reader.getEntities(
@@ -1646,7 +1656,7 @@ public class TestHBaseTimelineStorageApps {
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
         new TimelineEntityFilters.Builder().build(),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     int metricCnt = 0;
     for (TimelineEntity entity : es1) {
       metricCnt += entity.getMetrics().size();
@@ -1672,7 +1682,7 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
             .build(),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     int metricCnt = 0;
     assertEquals(1, entities.size());
     for (TimelineEntity entity : entities) {
@@ -1698,7 +1708,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
             .build(),
-        new TimelineDataToRetrieve(null, metricsToRetrieve, null, null));
+        new TimelineDataToRetrieve(null, metricsToRetrieve, null, null, null,
+        null));
     metricCnt = 0;
     assertEquals(2, entities.size());
     for (TimelineEntity entity : entities) {
@@ -1715,8 +1726,8 @@ public class TestHBaseTimelineStorageApps {
         TimelineEntityType.YARN_APPLICATION.toString(), null),
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
             .build(),
-        new TimelineDataToRetrieve(null,
-        metricsToRetrieve, EnumSet.of(Field.METRICS), Integer.MAX_VALUE));
+        new TimelineDataToRetrieve(null, metricsToRetrieve,
+        EnumSet.of(Field.METRICS), Integer.MAX_VALUE, null, null));
     metricCnt = 0;
     int metricValCnt = 0;
     assertEquals(2, entities.size());
@@ -1733,6 +1744,86 @@ public class TestHBaseTimelineStorageApps {
   }
 
   @Test
+  public void testReadAppsMetricTimeRange() throws Exception {
+    Set<TimelineEntity> entities = reader.getEntities(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
+        null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        100, null, null));
+    assertEquals(3, entities.size());
+    int metricTimeSeriesCnt = 0;
+    int metricCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(13, metricTimeSeriesCnt);
+
+    entities = reader.getEntities(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
+        null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        100, CURRENT_TIME - 40000, CURRENT_TIME));
+    assertEquals(3, entities.size());
+    metricCnt = 0;
+    metricTimeSeriesCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        for (Long ts : m.getValues().keySet()) {
+          assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+        }
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(5, metricTimeSeriesCnt);
+
+    entities = reader.getEntities(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
+        null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        null, CURRENT_TIME - 40000, CURRENT_TIME));
+    assertEquals(3, entities.size());
+    metricCnt = 0;
+    metricTimeSeriesCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        for (Long ts : m.getValues().keySet()) {
+          assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+        }
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(3, metricTimeSeriesCnt);
+
+    TimelineEntity entity = reader.getEntity(new TimelineReaderContext(
+        "cluster1", "user1", "some_flow_name", 1002345678919L,
+        "application_1111111111_2222",
+        TimelineEntityType.YARN_APPLICATION.toString(), null),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), 100,
+        CURRENT_TIME - 40000, CURRENT_TIME));
+    assertNotNull(entity);
+    assertEquals(2, entity.getMetrics().size());
+    metricTimeSeriesCnt = 0;
+    for (TimelineMetric m : entity.getMetrics()) {
+      for (Long ts : m.getValues().keySet()) {
+        assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+      }
+      metricTimeSeriesCnt += m.getValues().size();
+    }
+    assertEquals(3, metricTimeSeriesCnt);
+  }
+
+  @Test
   public void testReadAppsInfoFilters() throws Exception {
     TimelineFilterList list1 = new TimelineFilterList();
     list1.addFilter(new TimelineKeyValueFilter(
@@ -1751,7 +1842,8 @@ public class TestHBaseTimelineStorageApps {
         1002345678919L, null, TimelineEntityType.YARN_APPLICATION.toString(),
         null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(2, entities.size());
     int infoCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1768,7 +1860,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList1)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(1, entities.size());
     infoCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1787,7 +1880,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList2)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList3 = new TimelineFilterList(
@@ -1799,7 +1893,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList3)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList4 = new TimelineFilterList(
@@ -1811,7 +1906,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList4)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList5 = new TimelineFilterList(
@@ -1823,7 +1919,8 @@ public class TestHBaseTimelineStorageApps {
         null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList5)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(3, entities.size());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 380d352..3756091 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -94,13 +94,14 @@ public class TestHBaseTimelineStorageEntities {
 
   private static HBaseTestingUtility util;
   private HBaseTimelineReaderImpl reader;
+  private static final long CURRENT_TIME = System.currentTimeMillis();
 
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
     util = new HBaseTestingUtility();
     util.startMiniCluster();
     DataGeneratorForTest.createSchema(util.getConfiguration());
-    DataGeneratorForTest.loadEntities(util);
+    DataGeneratorForTest.loadEntities(util, CURRENT_TIME);
   }
 
   @Before
@@ -296,13 +297,13 @@ public class TestHBaseTimelineStorageEntities {
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), entity.getId()),
           new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL),
-          Integer.MAX_VALUE));
+          Integer.MAX_VALUE, null, null));
       Set<TimelineEntity> es1 = reader.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), null),
           new TimelineEntityFilters.Builder().build(),
           new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL),
-          Integer.MAX_VALUE));
+          Integer.MAX_VALUE, null, null));
       assertNotNull(e1);
       assertEquals(1, es1.size());
 
@@ -333,7 +334,8 @@ public class TestHBaseTimelineStorageEntities {
 
       e1 = reader.getEntity(new TimelineReaderContext(cluster, user, flow,
           runid, appName, entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       assertNotNull(e1);
       assertEquals(id, e1.getId());
       assertEquals(type, e1.getType());
@@ -451,12 +453,14 @@ public class TestHBaseTimelineStorageEntities {
       TimelineEntity e1 = reader.getEntity(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       Set<TimelineEntity> es1 = reader.getEntities(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), null),
           new TimelineEntityFilters.Builder().build(),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       assertNotNull(e1);
       assertEquals(1, es1.size());
 
@@ -517,7 +521,8 @@ public class TestHBaseTimelineStorageEntities {
       TimelineEntity e1 = reader.getEntity(
           new TimelineReaderContext(cluster, user, flow, runid, appName,
           entity.getType(), entity.getId()),
-          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+          new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+          null, null));
       assertNotNull(e1);
       // check the events
       NavigableSet<TimelineEvent> events = e1.getEvents();
@@ -546,7 +551,8 @@ public class TestHBaseTimelineStorageEntities {
     TimelineEntity entity = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", "hello"),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertNotNull(entity);
     assertEquals(3, entity.getConfigs().size());
     assertEquals(1, entity.getIsRelatedToEntities().size());
@@ -554,7 +560,8 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world",
         null), new TimelineEntityFilters.Builder().build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(3, entities.size());
     int cfgCnt = 0;
     int metricCnt = 0;
@@ -681,7 +688,8 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().eventFilters(ef).build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(1, entities.size());
     int eventCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -801,7 +809,8 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().isRelatedTo(irt).build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     int isRelatedToCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -943,7 +952,8 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().relatesTo(rt).build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     int relatesToCnt = 0;
     for (TimelineEntity timelineEntity : entities) {
@@ -1138,7 +1148,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", "hello"),
         new TimelineDataToRetrieve(
-        null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null));
+        null, null, EnumSet.of(Field.INFO, Field.CONFIGS), null, null, null));
     assertNotNull(e1);
     assertEquals(3, e1.getConfigs().size());
     assertEquals(0, e1.getIsRelatedToEntities().size());
@@ -1146,8 +1156,8 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().build(),
-        new TimelineDataToRetrieve(
-        null, null, EnumSet.of(Field.IS_RELATED_TO, Field.METRICS), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.IS_RELATED_TO,
+        Field.METRICS), null, null, null));
     assertEquals(3, es1.size());
     int metricsCnt = 0;
     int isRelatedToCnt = 0;
@@ -1170,14 +1180,14 @@ public class TestHBaseTimelineStorageEntities {
     TimelineEntity e1 = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", "hello"),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     assertNotNull(e1);
     assertEquals(1, e1.getConfigs().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().build(),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     int cfgCnt = 0;
     for (TimelineEntity entity : es1) {
       cfgCnt += entity.getConfigs().size();
@@ -1209,7 +1219,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().configFilters(confFilterList)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(2, entities.size());
     int cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1222,7 +1232,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().configFilters(confFilterList)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1239,7 +1250,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().configFilters(confFilterList1)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(1, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1258,7 +1269,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().configFilters(confFilterList2)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList3 = new TimelineFilterList(
@@ -1270,7 +1281,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().configFilters(confFilterList3)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList4 = new TimelineFilterList(
@@ -1282,7 +1293,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().configFilters(confFilterList4)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-            null));
+            null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList confFilterList5 = new TimelineFilterList(
@@ -1294,7 +1305,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().configFilters(confFilterList5)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.CONFIGS),
-        null));
+        null, null, null));
     assertEquals(3, entities.size());
   }
 
@@ -1311,7 +1322,7 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().configFilters(confFilterList)
             .build(),
-        new TimelineDataToRetrieve(list, null, null, null));
+        new TimelineDataToRetrieve(list, null, null, null, null, null));
     assertEquals(1, entities.size());
     int cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1342,7 +1353,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().configFilters(confFilterList1)
             .build(),
-        new TimelineDataToRetrieve(confsToRetrieve, null, null, null));
+        new TimelineDataToRetrieve(confsToRetrieve, null, null, null, null,
+        null));
     assertEquals(2, entities.size());
     cfgCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1363,14 +1375,14 @@ public class TestHBaseTimelineStorageEntities {
     TimelineEntity e1 = reader.getEntity(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", "hello"),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     assertNotNull(e1);
     assertEquals(1, e1.getMetrics().size());
     Set<TimelineEntity> es1 = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().build(),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     int metricCnt = 0;
     for (TimelineEntity entity : es1) {
       metricCnt += entity.getMetrics().size();
@@ -1383,6 +1395,63 @@ public class TestHBaseTimelineStorageEntities {
   }
 
   @Test
+  public void testReadEntitiesMetricTimeRange() throws Exception {
+    Set<TimelineEntity> entities = reader.getEntities(
+        new TimelineReaderContext("cluster1", "user1", "some_flow_name",
+        1002345678919L, "application_1231111111_1111", "world", null),
+        new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        100, null, null));
+    assertEquals(3, entities.size());
+    int metricTimeSeriesCnt = 0;
+    int metricCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(13, metricTimeSeriesCnt);
+
+    entities = reader.getEntities(new TimelineReaderContext("cluster1", "user1",
+        "some_flow_name", 1002345678919L, "application_1231111111_1111",
+        "world", null), new TimelineEntityFilters.Builder().build(),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
+        100, CURRENT_TIME - 40000, CURRENT_TIME));
+    assertEquals(3, entities.size());
+    metricCnt = 0;
+    metricTimeSeriesCnt = 0;
+    for (TimelineEntity entity : entities) {
+      metricCnt += entity.getMetrics().size();
+      for (TimelineMetric m : entity.getMetrics()) {
+        for (Long ts : m.getValues().keySet()) {
+          assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+        }
+        metricTimeSeriesCnt += m.getValues().size();
+      }
+    }
+    assertEquals(3, metricCnt);
+    assertEquals(5, metricTimeSeriesCnt);
+
+    TimelineEntity entity = reader.getEntity(new TimelineReaderContext(
+        "cluster1", "user1", "some_flow_name", 1002345678919L,
+        "application_1231111111_1111", "world", "hello"),
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS), 100,
+        CURRENT_TIME - 40000, CURRENT_TIME));
+    assertNotNull(entity);
+    assertEquals(2, entity.getMetrics().size());
+    metricTimeSeriesCnt = 0;
+    for (TimelineMetric m : entity.getMetrics()) {
+      for (Long ts : m.getValues().keySet()) {
+        assertTrue(ts >= CURRENT_TIME - 40000 && ts <= CURRENT_TIME);
+      }
+      metricTimeSeriesCnt += m.getValues().size();
+    }
+    assertEquals(3, metricTimeSeriesCnt);
+  }
+
+  @Test
   public void testReadEntitiesMetricFilters() throws Exception {
     TimelineFilterList list1 = new TimelineFilterList();
     list1.addFilter(new TimelineCompareFilter(
@@ -1400,7 +1469,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(2, entities.size());
     int metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1413,7 +1482,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null,
+        null, null));
     assertEquals(2, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1432,7 +1502,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(1, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1451,7 +1521,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList3 = new TimelineFilterList(
@@ -1463,7 +1533,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList4 = new TimelineFilterList(
@@ -1475,7 +1545,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList metricFilterList5 = new TimelineFilterList(
@@ -1487,7 +1557,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList5)
             .build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.METRICS),
-        null));
+        null, null, null));
     assertEquals(3, entities.size());
   }
 
@@ -1504,7 +1574,7 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
             .build(),
-        new TimelineDataToRetrieve(null, list, null, null));
+        new TimelineDataToRetrieve(null, list, null, null, null, null));
     assertEquals(1, entities.size());
     int metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1534,7 +1604,7 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
             .build(),
         new TimelineDataToRetrieve(
-        null, metricsToRetrieve, EnumSet.of(Field.METRICS), null));
+        null, metricsToRetrieve, EnumSet.of(Field.METRICS), null, null, null));
     assertEquals(2, entities.size());
     metricCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1553,8 +1623,8 @@ public class TestHBaseTimelineStorageEntities {
         "world", null),
         new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
             .build(),
-        new TimelineDataToRetrieve(null,
-        metricsToRetrieve, EnumSet.of(Field.METRICS), Integer.MAX_VALUE));
+        new TimelineDataToRetrieve(null, metricsToRetrieve,
+        EnumSet.of(Field.METRICS), Integer.MAX_VALUE, null, null));
     assertEquals(2, entities.size());
     metricCnt = 0;
     int metricValCnt = 0;
@@ -1588,7 +1658,8 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineReaderContext("cluster1", "user1", "some_flow_name",
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(2, entities.size());
     int infoCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1604,7 +1675,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList1)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(1, entities.size());
     infoCnt = 0;
     for (TimelineEntity entity : entities) {
@@ -1622,7 +1694,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList2)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList3 = new TimelineFilterList(
@@ -1633,7 +1706,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList3)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList4 = new TimelineFilterList(
@@ -1644,7 +1718,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList4)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(0, entities.size());
 
     TimelineFilterList infoFilterList5 = new TimelineFilterList(
@@ -1655,7 +1730,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", null),
         new TimelineEntityFilters.Builder().infoFilters(infoFilterList5)
             .build(),
-        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null));
+        new TimelineDataToRetrieve(null, null, EnumSet.of(Field.INFO), null,
+        null, null));
     assertEquals(3, entities.size());
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
index e1309e7..acfdc4d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
@@ -584,7 +584,8 @@ public class TestHBaseStorageFlowRun {
       TimelineEntity entity = hbr.getEntity(
           new TimelineReaderContext(cluster, user, flow, 1002345678919L, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
-          new TimelineDataToRetrieve(null, metricsToRetrieve, null, null));
+          new TimelineDataToRetrieve(null, metricsToRetrieve, null, null, null,
+          null));
       assertTrue(TimelineEntityType.YARN_FLOW_RUN.matches(entity.getType()));
       Set<TimelineMetric> metrics = entity.getMetrics();
       assertEquals(1, metrics.size());
@@ -609,7 +610,8 @@ public class TestHBaseStorageFlowRun {
           new TimelineReaderContext(cluster, user, flow, null, null,
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
           new TimelineEntityFilters.Builder().build(),
-          new TimelineDataToRetrieve(null, metricsToRetrieve, null, null));
+          new TimelineDataToRetrieve(null, metricsToRetrieve, null, null, null,
+          null));
       assertEquals(2, entities.size());
       int metricCnt = 0;
       for (TimelineEntity timelineEntity : entities) {
@@ -681,7 +683,7 @@ public class TestHBaseStorageFlowRun {
           TimelineEntityType.YARN_FLOW_RUN.toString(), null),
           new TimelineEntityFilters.Builder().build(),
           new TimelineDataToRetrieve(null, null,
-              EnumSet.of(Field.METRICS), null));
+              EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(1, entities.size());
       for (TimelineEntity timelineEntity : entities) {
         Set<TimelineMetric> timelineMetrics = timelineEntity.getMetrics();
@@ -948,7 +950,7 @@ public class TestHBaseStorageFlowRun {
           new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
               .build(),
           new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(2, entities.size());
       int metricCnt = 0;
       for (TimelineEntity entity : entities) {
@@ -966,7 +968,7 @@ public class TestHBaseStorageFlowRun {
           new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
               .build(),
           new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(1, entities.size());
       metricCnt = 0;
       for (TimelineEntity entity : entities) {
@@ -983,7 +985,7 @@ public class TestHBaseStorageFlowRun {
           new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
               .build(),
           new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(0, entities.size());
 
       TimelineFilterList metricFilterList3 = new TimelineFilterList(
@@ -994,7 +996,7 @@ public class TestHBaseStorageFlowRun {
           new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
               .build(),
           new TimelineDataToRetrieve(null, null,
-          EnumSet.of(Field.METRICS), null));
+          EnumSet.of(Field.METRICS), null, null, null));
       assertEquals(0, entities.size());
 
       TimelineFilterList list3 = new TimelineFilterList();
@@ -1016,7 +1018,7 @@ public class TestHBaseStorageFlowRun {
           new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
               .build(),
           new TimelineDataToRetrieve(null, metricsToRetrieve,
-          EnumSet.of(Field.ALL), null));
+          EnumSet.of(Field.ALL), null, null, null));
       assertEquals(2, entities.size());
       metricCnt = 0;
       for (TimelineEntity entity : entities) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
index 162f973..46e427e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -192,7 +192,6 @@ public class ColumnHelper<T> {
 
       NavigableMap<byte[], NavigableMap<Long, byte[]>> columnCellMap =
           resultMap.get(columnFamilyBytes);
-
       // could be that there is no such column family.
       if (columnCellMap != null) {
         for (Entry<byte[], NavigableMap<Long, byte[]>> entry : columnCellMap

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index feef6af..97e70b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Query;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -342,4 +343,20 @@ public final class HBaseTimelineStorageUtils {
     return (obj instanceof Short) || (obj instanceof Integer) ||
         (obj instanceof Long);
   }
+
+  public static void setMetricsTimeRange(Query query, byte[] metricsCf,
+      long tsBegin, long tsEnd) {
+    if (tsBegin != 0 || tsEnd != Long.MAX_VALUE) {
+      long supplementedTsBegin = tsBegin == 0 ? 0 :
+          TimestampGenerator.getSupplementedTimestamp(tsBegin, null);
+      long supplementedTsEnd =
+          (tsEnd == Long.MAX_VALUE) ? Long.MAX_VALUE :
+          TimestampGenerator.getSupplementedTimestamp(tsEnd + 1, null);
+      // Handle overflow by resetting time begin to 0 and time end to
+      // Long#MAX_VALUE, if required.
+      query.setColumnFamilyTimeRange(metricsCf,
+          ((supplementedTsBegin < 0) ? 0 : supplementedTsBegin),
+          ((supplementedTsEnd < 0) ? Long.MAX_VALUE : supplementedTsEnd));
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
index b4bb005..cda4510 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
@@ -24,6 +24,7 @@ import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Query;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -315,6 +316,8 @@ class ApplicationEntityReader extends GenericEntityReader {
             context.getFlowName(), context.getFlowRunId(), context.getAppId());
     byte[] rowKey = applicationRowKey.getRowKey();
     Get get = new Get(rowKey);
+    // Set time range for metric values.
+    setMetricsTimeRange(get);
     get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
     if (filterList != null && !filterList.getFilters().isEmpty()) {
       get.setFilter(filterList);
@@ -357,6 +360,14 @@ class ApplicationEntityReader extends GenericEntityReader {
     }
   }
 
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.
+        setMetricsTimeRange(query, ApplicationColumnFamily.METRICS.getBytes(),
+            getDataToRetrieve().getMetricsTimeBegin(),
+            getDataToRetrieve().getMetricsTimeEnd());
+  }
+
   @Override
   protected ResultScanner getResults(Configuration hbaseConf,
       Connection conn, FilterList filterList) throws IOException {
@@ -405,6 +416,9 @@ class ApplicationEntityReader extends GenericEntityReader {
       newList.addFilter(filterList);
     }
     scan.setFilter(newList);
+
+    // Set time range for metric values.
+    setMetricsTimeRange(scan);
     scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
     return getTable().getResultScanner(hbaseConf, conn, scan);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
index 39013d9..6b740e2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -26,6 +26,7 @@ import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Query;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
@@ -434,8 +435,8 @@ class GenericEntityReader extends TimelineEntityReader {
           context.getUserId(), context.getFlowName(), context.getFlowRunId(),
           context.getAppId(), context.getEntityType(),
           context.getEntityIdPrefix(), context.getEntityId()).getRowKey();
-
       Get get = new Get(rowKey);
+      setMetricsTimeRange(get);
       get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
       if (filterList != null && !filterList.getFilters().isEmpty()) {
         get.setFilter(filterList);
@@ -468,6 +469,14 @@ class GenericEntityReader extends TimelineEntityReader {
     return result;
   }
 
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.
+        setMetricsTimeRange(query, EntityColumnFamily.METRICS.getBytes(),
+            getDataToRetrieve().getMetricsTimeBegin(),
+            getDataToRetrieve().getMetricsTimeEnd());
+  }
+
   @Override
   protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
       FilterList filterList) throws IOException {
@@ -513,6 +522,7 @@ class GenericEntityReader extends TimelineEntityReader {
       // mode.
       filterList.addFilter(new PageFilter(getFilters().getLimit()));
     }
+    setMetricsTimeRange(scan);
     scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
     if (filterList != null && !filterList.getFilters().isEmpty()) {
       scan.setFilter(filterList);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70078e91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
index 325050a..8d09c00 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineDataToRetrieve.java
@@ -57,6 +57,11 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Fiel
  * metricsToRetrieve is specified, this limit defines an upper limit to the
  * number of metrics to return. This parameter is ignored if METRICS are not to
  * be fetched.</li>
+ * <li><b>metricsTimeStart</b> - Metric values before this timestamp would not
+ * be retrieved. If null or {@literal <0}, defaults to 0.</li>
+ * <li><b>metricsTimeEnd</b> - Metric values after this timestamp would not
+ * be retrieved. If null or {@literal <0}, defaults to {@link Long#MAX_VALUE}.
+ * </li>
  * </ul>
  */
 @Private
@@ -66,6 +71,10 @@ public class TimelineDataToRetrieve {
   private TimelineFilterList metricsToRetrieve;
   private EnumSet<Field> fieldsToRetrieve;
   private Integer metricsLimit;
+  private Long metricsTimeBegin;
+  private Long metricsTimeEnd;
+  private static final long DEFAULT_METRICS_BEGIN_TIME = 0L;
+  private static final long DEFAULT_METRICS_END_TIME = Long.MAX_VALUE;
 
   /**
    * Default limit of number of metrics to return.
@@ -73,12 +82,12 @@ public class TimelineDataToRetrieve {
   public static final Integer DEFAULT_METRICS_LIMIT = 1;
 
   public TimelineDataToRetrieve() {
-    this(null, null, null, null);
+    this(null, null, null, null, null, null);
   }
 
   public TimelineDataToRetrieve(TimelineFilterList confs,
       TimelineFilterList metrics, EnumSet<Field> fields,
-      Integer limitForMetrics) {
+      Integer limitForMetrics, Long metricTimeBegin, Long metricTimeEnd) {
     this.confsToRetrieve = confs;
     this.metricsToRetrieve = metrics;
     this.fieldsToRetrieve = fields;
@@ -91,6 +100,20 @@ public class TimelineDataToRetrieve {
     if (this.fieldsToRetrieve == null) {
       this.fieldsToRetrieve = EnumSet.noneOf(Field.class);
     }
+    if (metricTimeBegin == null || metricTimeBegin < 0) {
+      this.metricsTimeBegin = DEFAULT_METRICS_BEGIN_TIME;
+    } else {
+      this.metricsTimeBegin = metricTimeBegin;
+    }
+    if (metricTimeEnd == null || metricTimeEnd < 0) {
+      this.metricsTimeEnd = DEFAULT_METRICS_END_TIME;
+    } else {
+      this.metricsTimeEnd = metricTimeEnd;
+    }
+    if (this.metricsTimeBegin > this.metricsTimeEnd) {
+      throw new IllegalArgumentException("metricstimebegin should not be " +
+          "greater than metricstimeend");
+    }
   }
 
   public TimelineFilterList getConfsToRetrieve() {
@@ -137,6 +160,14 @@ public class TimelineDataToRetrieve {
     return metricsLimit;
   }
 
+  public Long getMetricsTimeBegin() {
+    return this.metricsTimeBegin;
+  }
+
+  public Long getMetricsTimeEnd() {
+    return metricsTimeEnd;
+  }
+
   public void setMetricsLimit(Integer limit) {
     if (limit == null || limit < 1) {
       this.metricsLimit = DEFAULT_METRICS_LIMIT;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. Contributed by Haibo Chen.

Posted by st...@apache.org.
YARN-6253. FlowAcitivityColumnPrefix.store(byte[] rowKey, ...) drops timestamp. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf30b3b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf30b3b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf30b3b9

Branch: refs/heads/HADOOP-13345
Commit: cf30b3b9144b559a5f6fdfcf98bf0a15ebd17474
Parents: ff43b8d
Author: Sangjin Lee <sj...@apache.org>
Authored: Tue Feb 28 16:10:25 2017 -0800
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:52 2017 +0530

----------------------------------------------------------------------
 .../timelineservice/storage/flow/FlowActivityColumnPrefix.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf30b3b9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
index 439e0c8..5e7a5d6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -271,7 +271,7 @@ public enum FlowActivityColumnPrefix
     byte[] columnQualifier = getColumnPrefixBytes(qualifier);
     Attribute[] combinedAttributes =
         HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-    column.store(rowKey, tableMutator, columnQualifier, null, inputValue,
+    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
         combinedAttributes);
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: MAPREDUCE-6838. [ATSv2 Security] Add timeline delegation token received in allocate response to UGI. Contributed by Varun Saxena

Posted by st...@apache.org.
MAPREDUCE-6838. [ATSv2 Security] Add timeline delegation token received in allocate response to UGI. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08f40bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08f40bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08f40bcc

Branch: refs/heads/HADOOP-13345
Commit: 08f40bcc7f4174857bb1fc7c8eb1108d5caaafb3
Parents: bea3e4d
Author: Jian He <ji...@apache.org>
Authored: Mon Aug 21 22:08:07 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 .../v2/app/rm/RMContainerAllocator.java         |  17 +--
 .../v2/app/rm/TestRMContainerAllocator.java     | 137 +++++++++++++++++++
 .../hadoop/yarn/api/records/CollectorInfo.java  |   4 +
 .../api/async/impl/AMRMClientAsyncImpl.java     |  13 +-
 .../yarn/client/api/TimelineV2Client.java       |  11 +-
 .../client/api/impl/TimelineV2ClientImpl.java   |  80 ++++++++++-
 .../api/impl/TestTimelineClientV2Impl.java      |  56 +++++++-
 .../timelineservice/NMTimelinePublisher.java    |   3 +-
 .../TestTimelineServiceClientIntegration.java   |  13 +-
 .../security/TestTimelineAuthFilterForV2.java   |   3 +-
 10 files changed, 301 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 969ec4c..0dc7642 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -848,7 +848,8 @@ public class RMContainerAllocator extends RMContainerRequestor
       updateAMRMToken(response.getAMRMToken());
     }
 
-    List<ContainerStatus> finishedContainers = response.getCompletedContainersStatuses();
+    List<ContainerStatus> finishedContainers =
+        response.getCompletedContainersStatuses();
 
     // propagate preemption requests
     final PreemptionMessage preemptReq = response.getPreemptionMessage();
@@ -877,19 +878,13 @@ public class RMContainerAllocator extends RMContainerRequestor
 
     handleUpdatedNodes(response);
     handleJobPriorityChange(response);
-    // handle receiving the timeline collector address for this app
-    String collectorAddr = null;
-    if (response.getCollectorInfo() != null) {
-      collectorAddr = response.getCollectorInfo().getCollectorAddr();
-    }
-
+    // Handle receiving the timeline collector address and token for this app.
     MRAppMaster.RunningAppContext appContext =
         (MRAppMaster.RunningAppContext)this.getContext();
-    if (collectorAddr != null && !collectorAddr.isEmpty()
-        && appContext.getTimelineV2Client() != null) {
-      appContext.getTimelineV2Client().setTimelineServiceAddress(collectorAddr);
+    if (appContext.getTimelineV2Client() != null) {
+      appContext.getTimelineV2Client().
+          setTimelineCollectorInfo(response.getCollectorInfo());
     }
-
     for (ContainerStatus cont : finishedContainers) {
       processFinishedContainer(cont);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 6c51626..6c74a7a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapreduce.v2.app.rm;
 
+import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyFloat;
 import static org.mockito.Matchers.anyInt;
@@ -27,6 +28,7 @@ import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.inOrder;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -99,6 +101,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -110,6 +113,7 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
@@ -121,6 +125,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -137,9 +142,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaS
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
+import org.apache.hadoop.yarn.server.timelineservice.security.TimelineV2DelegationTokenSecretManagerService.TimelineV2DelegationTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Clock;
 import org.apache.hadoop.yarn.util.ControlledClock;
+import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.SystemClock;
 import org.junit.After;
 import org.junit.Assert;
@@ -749,6 +756,96 @@ public class TestRMContainerAllocator {
   }
 
   @Test
+  public void testUpdateCollectorInfo() throws Exception {
+    LOG.info("Running testUpdateCollectorInfo");
+    Configuration conf = new Configuration();
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+    ApplicationId appId = ApplicationId.newInstance(1, 1);
+    ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
+    JobId jobId = MRBuilderUtils.newJobId(appId, 0);
+    Job mockJob = mock(Job.class);
+    when(mockJob.getReport()).thenReturn(
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
+    String localAddr = "localhost:1234";
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    // Generate a timeline delegation token.
+    TimelineDelegationTokenIdentifier ident =
+        new TimelineDelegationTokenIdentifier(new Text(ugi.getUserName()),
+        new Text("renewer"), null);
+    ident.setSequenceNumber(1);
+    Token<TimelineDelegationTokenIdentifier> collectorToken =
+        new Token<TimelineDelegationTokenIdentifier> (ident.getBytes(),
+        new byte[0], TimelineDelegationTokenIdentifier.KIND_NAME,
+        new Text(localAddr));
+    org.apache.hadoop.yarn.api.records.Token token =
+        org.apache.hadoop.yarn.api.records.Token.newInstance(
+            collectorToken.getIdentifier(), collectorToken.getKind().toString(),
+            collectorToken.getPassword(),
+            collectorToken.getService().toString());
+    CollectorInfo collectorInfo = CollectorInfo.newInstance(localAddr, token);
+    // Mock scheduler to server Allocate request.
+    final MockSchedulerForTimelineCollector mockScheduler =
+        new MockSchedulerForTimelineCollector(collectorInfo);
+    MyContainerAllocator allocator =
+        new MyContainerAllocator(null, conf, attemptId, mockJob,
+            SystemClock.getInstance()) {
+          @Override
+          protected void register() {
+          }
+
+          @Override
+          protected ApplicationMasterProtocol createSchedulerProxy() {
+            return mockScheduler;
+          }
+        };
+    // Initially UGI should have no tokens.
+    ArrayList<Token<? extends TokenIdentifier>> tokens =
+        new ArrayList<>(ugi.getTokens());
+    assertEquals(0, tokens.size());
+    TimelineV2Client client = spy(TimelineV2Client.createTimelineClient(appId));
+    client.init(conf);
+    when(((RunningAppContext)allocator.getContext()).getTimelineV2Client()).
+        thenReturn(client);
+
+    // Send allocate request to RM and fetch collector address and token.
+    allocator.schedule();
+    verify(client).setTimelineCollectorInfo(collectorInfo);
+    // Verify if token has been updated in UGI.
+    tokens = new ArrayList<>(ugi.getTokens());
+    assertEquals(1, tokens.size());
+    assertEquals(TimelineDelegationTokenIdentifier.KIND_NAME,
+        tokens.get(0).getKind());
+    assertEquals(collectorToken.decodeIdentifier(),
+        tokens.get(0).decodeIdentifier());
+
+    // Generate new collector token, send allocate request to RM and fetch the
+    // new token.
+    ident.setSequenceNumber(100);
+    Token<TimelineDelegationTokenIdentifier> collectorToken1 =
+        new Token<TimelineDelegationTokenIdentifier> (ident.getBytes(),
+        new byte[0], TimelineDelegationTokenIdentifier.KIND_NAME,
+        new Text(localAddr));
+    token = org.apache.hadoop.yarn.api.records.Token.newInstance(
+        collectorToken1.getIdentifier(), collectorToken1.getKind().toString(),
+        collectorToken1.getPassword(), collectorToken1.getService().toString());
+    collectorInfo = CollectorInfo.newInstance(localAddr, token);
+    mockScheduler.updateCollectorInfo(collectorInfo);
+    allocator.schedule();
+    verify(client).setTimelineCollectorInfo(collectorInfo);
+    // Verify if new token has been updated in UGI.
+    tokens = new ArrayList<>(ugi.getTokens());
+    assertEquals(1, tokens.size());
+    assertEquals(TimelineDelegationTokenIdentifier.KIND_NAME,
+        tokens.get(0).getKind());
+    assertEquals(collectorToken1.decodeIdentifier(),
+        tokens.get(0).decodeIdentifier());
+    allocator.close();
+  }
+
+  @Test
   public void testMapReduceScheduling() throws Exception {
 
     LOG.info("Running testMapReduceScheduling");
@@ -3488,6 +3585,46 @@ public class TestRMContainerAllocator {
     }
   }
 
+  private static class MockSchedulerForTimelineCollector
+      implements ApplicationMasterProtocol {
+    CollectorInfo collectorInfo;
+
+    public MockSchedulerForTimelineCollector(CollectorInfo info) {
+      this.collectorInfo = info;
+    }
+
+    void updateCollectorInfo(CollectorInfo info) {
+      collectorInfo = info;
+    }
+
+    @Override
+    public RegisterApplicationMasterResponse registerApplicationMaster(
+        RegisterApplicationMasterRequest request) throws YarnException,
+        IOException {
+      return Records.newRecord(RegisterApplicationMasterResponse.class);
+    }
+
+    @Override
+    public FinishApplicationMasterResponse finishApplicationMaster(
+        FinishApplicationMasterRequest request) throws YarnException,
+        IOException {
+      return FinishApplicationMasterResponse.newInstance(false);
+    }
+
+    @Override
+    public AllocateResponse allocate(AllocateRequest request)
+        throws YarnException, IOException {
+      AllocateResponse response =  AllocateResponse.newInstance(
+          request.getResponseId(), Collections.<ContainerStatus>emptyList(),
+          Collections.<Container>emptyList(),
+          Collections.<NodeReport>emptyList(),
+          Resource.newInstance(512000, 1024), null, 10, null,
+          Collections.<NMToken>emptyList());
+      response.setCollectorInfo(collectorInfo);
+      return response;
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     TestRMContainerAllocator t = new TestRMContainerAllocator();
     t.testSimple();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
index 960c992..d22b9fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
@@ -32,6 +32,10 @@ public abstract class CollectorInfo {
 
   protected static final long DEFAULT_TIMESTAMP_VALUE = -1;
 
+  public static CollectorInfo newInstance(String collectorAddr) {
+    return newInstance(collectorAddr, null);
+  }
+
   public static CollectorInfo newInstance(String collectorAddr, Token token) {
     CollectorInfo amCollectorInfo =
         Records.newRecord(CollectorInfo.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
index 265badb..d12b108 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
@@ -68,8 +68,6 @@ extends AMRMClientAsync<T> {
   
   private volatile boolean keepRunning;
   private volatile float progress;
-  
-  private volatile String collectorAddr;
 
   /**
    *
@@ -332,14 +330,9 @@ extends AMRMClientAsync<T> {
 
           TimelineV2Client timelineClient =
               client.getRegisteredTimelineV2Client();
-          if (timelineClient != null && collectorAddress != null
-              && !collectorAddress.isEmpty()) {
-            if (collectorAddr == null
-                || !collectorAddr.equals(collectorAddress)) {
-              collectorAddr = collectorAddress;
-              timelineClient.setTimelineServiceAddress(collectorAddress);
-              LOG.info("collectorAddress " + collectorAddress);
-            }
+          if (timelineClient != null && response.getCollectorInfo() != null) {
+            timelineClient.
+                setTimelineCollectorInfo(response.getCollectorInfo());
           }
 
           List<NodeReport> updatedNodes = response.getUpdatedNodes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
index 32cf1e9..da81a91 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
@@ -23,6 +23,8 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.impl.TimelineV2ClientImpl;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -83,10 +85,13 @@ public abstract class TimelineV2Client extends CompositeService {
 
   /**
    * <p>
-   * Update the timeline service address where the request will be sent to.
+   * Update collector info received in AllocateResponse which contains the
+   * timeline service address where the request will be sent to and the timeline
+   * delegation token which will be used to send the request.
    * </p>
    *
-   * @param address the timeline service address
+   * @param collectorInfo Collector info which contains the timeline service
+   * address and timeline delegation token.
    */
-  public abstract void setTimelineServiceAddress(String address);
+  public abstract void setTimelineCollectorInfo(CollectorInfo collectorInfo);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
index 128ae7a..97d1364 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.client.api.impl;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.reflect.UndeclaredThrowableException;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.concurrent.BlockingQueue;
@@ -39,15 +40,22 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.core.util.MultivaluedMapImpl;
 
@@ -62,6 +70,8 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
 
   private TimelineEntityDispatcher entityDispatcher;
   private volatile String timelineServiceAddress;
+  @VisibleForTesting
+  volatile Token currentTimelineToken = null;
 
   // Retry parameters for identifying new timeline service
   // TODO consider to merge with connection retry
@@ -100,7 +110,6 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
       authUgi = ugi;
       doAsUser = null;
     }
-
     // TODO need to add/cleanup filter retry later for ATSV2. similar to V1
     DelegationTokenAuthenticatedURL.Token token =
         new DelegationTokenAuthenticatedURL.Token();
@@ -144,8 +153,73 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
   }
 
   @Override
-  public void setTimelineServiceAddress(String address) {
-    this.timelineServiceAddress = address;
+  public void setTimelineCollectorInfo(CollectorInfo collectorInfo) {
+    if (collectorInfo == null) {
+      LOG.warn("Not setting collector info as it is null.");
+      return;
+    }
+    // First update the token so that it is available when collector address is
+    // used.
+    if (collectorInfo.getCollectorToken() != null) {
+      // Use collector address to update token service if its not available.
+      setTimelineDelegationToken(
+          collectorInfo.getCollectorToken(), collectorInfo.getCollectorAddr());
+    }
+    // Update timeline service address.
+    if (collectorInfo.getCollectorAddr() != null &&
+        !collectorInfo.getCollectorAddr().isEmpty() &&
+        !collectorInfo.getCollectorAddr().equals(timelineServiceAddress)) {
+      this.timelineServiceAddress = collectorInfo.getCollectorAddr();
+      LOG.info("Updated timeline service address to " + timelineServiceAddress);
+    }
+  }
+
+  private void setTimelineDelegationToken(Token delegationToken,
+      String collectorAddr) {
+    // Checks below are to ensure that an invalid token is not updated in UGI.
+    // This is required because timeline token is set via a public API.
+    if (!delegationToken.getKind().equals(
+        TimelineDelegationTokenIdentifier.KIND_NAME.toString())) {
+      LOG.warn("Timeline token to be updated should be of kind " +
+          TimelineDelegationTokenIdentifier.KIND_NAME);
+      return;
+    }
+    if (collectorAddr == null || collectorAddr.isEmpty()) {
+      collectorAddr = timelineServiceAddress;
+    }
+    // Token need not be updated if either address or token service does not
+    // exist.
+    String service = delegationToken.getService();
+    if ((service == null || service.isEmpty()) &&
+        (collectorAddr == null || collectorAddr.isEmpty())) {
+      LOG.warn("Timeline token does not have service and timeline service " +
+          "address is not yet set. Not updating the token");
+      return;
+    }
+    // No need to update a duplicate token.
+    if (currentTimelineToken != null &&
+        currentTimelineToken.equals(delegationToken)) {
+      return;
+    }
+    currentTimelineToken = delegationToken;
+    // Convert the token, sanitize the token service and add it to UGI.
+    org.apache.hadoop.security.token.
+        Token<TimelineDelegationTokenIdentifier> timelineToken =
+            new org.apache.hadoop.security.token.
+            Token<TimelineDelegationTokenIdentifier>(
+                delegationToken.getIdentifier().array(),
+                delegationToken.getPassword().array(),
+                new Text(delegationToken.getKind()),
+                service == null ? new Text() : new Text(service));
+    // Prefer timeline service address over service coming in the token for
+    // updating the token service.
+    InetSocketAddress serviceAddr =
+        (collectorAddr != null && !collectorAddr.isEmpty()) ?
+        NetUtils.createSocketAddr(collectorAddr) :
+        SecurityUtil.getTokenServiceAddr(timelineToken);
+    SecurityUtil.setTokenService(timelineToken, serviceAddr);
+    authUgi.addToken(timelineToken);
+    LOG.info("Updated timeline delegation token " + timelineToken);
   }
 
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
index c5b02fd..95595a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestTimelineClientV2Impl.java
@@ -18,6 +18,11 @@
 
 package org.apache.hadoop.yarn.client.api.impl;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
 import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
@@ -27,11 +32,16 @@ import javax.ws.rs.core.MultivaluedMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -151,7 +161,7 @@ public class TestTimelineClientV2Impl {
         maxRetries);
     c.init(conf);
     c.start();
-    c.setTimelineServiceAddress("localhost:12345");
+    c.setTimelineCollectorInfo(CollectorInfo.newInstance("localhost:12345"));
     try {
       c.putEntities(new TimelineEntity());
     } catch (IOException e) {
@@ -311,6 +321,50 @@ public class TestTimelineClientV2Impl {
   }
 
   @Test
+  public void testSetTimelineToken() throws Exception {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    assertEquals(0, ugi.getTokens().size());
+    assertNull("Timeline token in v2 client should not be set",
+        client.currentTimelineToken);
+
+    Token token = Token.newInstance(
+        new byte[0], "kind", new byte[0], "service");
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(null, token));
+    assertNull("Timeline token in v2 client should not be set as token kind " +
+        "is unexepcted.", client.currentTimelineToken);
+    assertEquals(0, ugi.getTokens().size());
+
+    token = Token.newInstance(new byte[0], TimelineDelegationTokenIdentifier.
+        KIND_NAME.toString(), new byte[0], null);
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(null, token));
+    assertNull("Timeline token in v2 client should not be set as serice is " +
+        "not set.", client.currentTimelineToken);
+    assertEquals(0, ugi.getTokens().size());
+
+    TimelineDelegationTokenIdentifier ident =
+        new TimelineDelegationTokenIdentifier(new Text(ugi.getUserName()),
+        new Text("renewer"), null);
+    ident.setSequenceNumber(1);
+    token = Token.newInstance(ident.getBytes(),
+        TimelineDelegationTokenIdentifier.KIND_NAME.toString(), new byte[0],
+        "localhost:1234");
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(null, token));
+    assertEquals(1, ugi.getTokens().size());
+    assertNotNull("Timeline token should be set in v2 client.",
+        client.currentTimelineToken);
+    assertEquals(token, client.currentTimelineToken);
+
+    ident.setSequenceNumber(20);
+    Token newToken = Token.newInstance(ident.getBytes(),
+        TimelineDelegationTokenIdentifier.KIND_NAME.toString(), new byte[0],
+        "localhost:1234");
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(null, newToken));
+    assertEquals(1, ugi.getTokens().size());
+    assertNotEquals(token, client.currentTimelineToken);
+    assertEquals(newToken, client.currentTimelineToken);
+  }
+
+  @Test
   public void testAfterStop() throws Exception {
     client.setSleepBeforeReturn(true);
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index 34eddf7..b8192ca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -437,7 +438,7 @@ public class NMTimelinePublisher extends CompositeService {
       String collectorAddr) {
     TimelineV2Client client = appToClientMap.get(appId);
     if (client != null) {
-      client.setTimelineServiceAddress(collectorAddr);
+      client.setTimelineCollectorInfo(CollectorInfo.newInstance(collectorAddr));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
index eb4381d..6a5ef55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/TestTimelineServiceClientIntegration.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
@@ -99,9 +100,9 @@ public class TestTimelineServiceClientIntegration {
     TimelineV2Client client =
         TimelineV2Client.createTimelineClient(ApplicationId.newInstance(0, 1));
     try {
-      // set the timeline service address manually
-      client.setTimelineServiceAddress(
-          collectorManager.getRestServerBindAddress());
+      // Set the timeline service address manually.
+      client.setTimelineCollectorInfo(CollectorInfo.newInstance(
+          collectorManager.getRestServerBindAddress()));
       client.init(conf);
       client.start();
       TimelineEntity entity = new TimelineEntity();
@@ -126,9 +127,9 @@ public class TestTimelineServiceClientIntegration {
     TimelineV2Client client =
         TimelineV2Client.createTimelineClient(appId);
     try {
-      // set the timeline service address manually
-      client.setTimelineServiceAddress(
-          collectorManager.getRestServerBindAddress());
+      // Set the timeline service address manually.
+      client.setTimelineCollectorInfo(CollectorInfo.newInstance(
+          collectorManager.getRestServerBindAddress()));
       client.init(conf);
       client.start();
       ClusterEntity cluster = new ClusterEntity();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08f40bcc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
index bc1594c..75f17fb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -228,7 +229,7 @@ public class TestTimelineAuthFilterForV2 {
     String restBindAddr = collectorManager.getRestServerBindAddress();
     String addr =
         "localhost" + restBindAddr.substring(restBindAddr.indexOf(":"));
-    client.setTimelineServiceAddress(addr);
+    client.setTimelineCollectorInfo(CollectorInfo.newInstance(addr));
     client.init(conf);
     client.start();
     return client;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-6874. Supplement timestamp for min start/max end time columns in flow run table to avoid overwrite (Vrushali C via Varun Saxena)

Posted by st...@apache.org.
YARN-6874. Supplement timestamp for min start/max end time columns in flow run table to avoid overwrite (Vrushali C via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60765aff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60765aff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60765aff

Branch: refs/heads/HADOOP-13345
Commit: 60765aff9b57270566b6ec5b5b71433261a168e3
Parents: 354be99
Author: Varun Saxena <va...@apache.org>
Authored: Thu Aug 10 11:01:19 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../yarn/server/timelineservice/storage/flow/FlowRunColumn.java    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60765aff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
index 7a39120..3797faf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
@@ -76,7 +76,7 @@ public enum FlowRunColumn implements Column<FlowRunTable> {
     // Future-proof by ensuring the right column prefix hygiene.
     this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
         .encode(columnQualifier));
-    this.column = new ColumnHelper<FlowRunTable>(columnFamily, converter);
+    this.column = new ColumnHelper<FlowRunTable>(columnFamily, converter, true);
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: YARN-5647. [ATSv2 Security] Collector side changes for loading auth filters and principals. Contributed by Varun Saxena

Posted by st...@apache.org.
YARN-5647. [ATSv2 Security] Collector side changes for loading auth filters and principals. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/879de512
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/879de512
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/879de512

Branch: refs/heads/HADOOP-13345
Commit: 879de51206ddef132c092ee21e8b6c6e5976a56e
Parents: 24447b3
Author: Jian He <ji...@apache.org>
Authored: Wed Jun 7 13:45:34 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../ApplicationHistoryServer.java               |  79 ++---
 .../security/TimelineAuthenticationFilter.java  |  49 ---
 ...TimelineAuthenticationFilterInitializer.java | 129 -------
 ...lineDelegationTokenSecretManagerService.java | 240 --------------
 ...neV1DelegationTokenSecretManagerService.java | 225 +++++++++++++
 .../TestTimelineAuthenticationFilter.java       | 323 ------------------
 .../TestTimelineAuthenticationFilterForV1.java  | 332 +++++++++++++++++++
 ...TimelineAuthenticationFilterInitializer.java |  76 -----
 .../security/TimelineAuthenticationFilter.java  |  55 +++
 ...TimelineAuthenticationFilterInitializer.java | 129 +++++++
 ...elineDelgationTokenSecretManagerService.java |  83 +++++
 .../server/timeline/security/package-info.java  |  26 ++
 .../util/timeline/TimelineServerUtils.java      |  92 +++++
 .../yarn/server/util/timeline/package-info.java |  25 ++
 ...TimelineAuthenticationFilterInitializer.java |  76 +++++
 .../collector/NodeTimelineCollectorManager.java |  66 +++-
 .../PerNodeTimelineCollectorsAuxService.java    |   5 +-
 .../collector/TimelineCollectorManager.java     |   6 +-
 ...neV2DelegationTokenSecretManagerService.java |  78 +++++
 .../timelineservice/security/package-info.java  |  25 ++
 20 files changed, 1227 insertions(+), 892 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
index 85e5f2d..4e3a1e6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
@@ -20,14 +20,14 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.ArrayList;
+import java.util.LinkedHashSet;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
-import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
@@ -47,10 +47,9 @@ import org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore;
 import org.apache.hadoop.yarn.server.timeline.TimelineDataManager;
 import org.apache.hadoop.yarn.server.timeline.TimelineStore;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
-import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilter;
-import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
-import org.apache.hadoop.yarn.server.timeline.security.TimelineDelegationTokenSecretManagerService;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineV1DelegationTokenSecretManagerService;
 import org.apache.hadoop.yarn.server.timeline.webapp.CrossOriginFilterInitializer;
+import org.apache.hadoop.yarn.server.util.timeline.TimelineServerUtils;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.apache.hadoop.yarn.webapp.WebApps;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@@ -75,7 +74,7 @@ public class ApplicationHistoryServer extends CompositeService {
   private ApplicationACLsManager aclsManager;
   private ApplicationHistoryManager historyManager;
   private TimelineStore timelineStore;
-  private TimelineDelegationTokenSecretManagerService secretManagerService;
+  private TimelineV1DelegationTokenSecretManagerService secretManagerService;
   private TimelineDataManager timelineDataManager;
   private WebApp webApp;
   private JvmPauseMonitor pauseMonitor;
@@ -223,9 +222,9 @@ public class ApplicationHistoryServer extends CompositeService {
         TimelineStore.class), conf);
   }
 
-  private TimelineDelegationTokenSecretManagerService
+  private TimelineV1DelegationTokenSecretManagerService
       createTimelineDelegationTokenSecretManagerService(Configuration conf) {
-    return new TimelineDelegationTokenSecretManagerService();
+    return new TimelineV1DelegationTokenSecretManagerService();
   }
 
   private TimelineDataManager createTimelineDataManager(Configuration conf) {
@@ -237,63 +236,33 @@ public class ApplicationHistoryServer extends CompositeService {
   @SuppressWarnings("unchecked")
   private void startWebApp() {
     Configuration conf = getConfig();
-    TimelineAuthenticationFilter.setTimelineDelegationTokenSecretManager(
-        secretManagerService.getTimelineDelegationTokenSecretManager());
     // Always load pseudo authentication filter to parse "user.name" in an URL
     // to identify a HTTP request's user in insecure mode.
     // When Kerberos authentication type is set (i.e., secure mode is turned on),
     // the customized filter will be loaded by the timeline server to do Kerberos
     // + DT authentication.
-    String initializers = conf.get("hadoop.http.filter.initializers");
-    boolean modifiedInitializers = false;
-
-    initializers =
-        initializers == null || initializers.length() == 0 ? "" : initializers;
-
+    String initializers = conf.get("hadoop.http.filter.initializers", "");
+    Set<String> defaultInitializers = new LinkedHashSet<String>();
+    // Add CORS filter
     if (!initializers.contains(CrossOriginFilterInitializer.class.getName())) {
-      if(conf.getBoolean(YarnConfiguration
-          .TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED, YarnConfiguration
-              .TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT)) {
-        if (initializers.contains(HttpCrossOriginFilterInitializer.class.getName())) {
-          initializers =
-            initializers.replaceAll(HttpCrossOriginFilterInitializer.class.getName(),
+      if(conf.getBoolean(YarnConfiguration.
+          TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED,
+          YarnConfiguration.
+          TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT)) {
+        if (initializers.contains(
+            HttpCrossOriginFilterInitializer.class.getName())) {
+          initializers = initializers.replaceAll(
+              HttpCrossOriginFilterInitializer.class.getName(),
               CrossOriginFilterInitializer.class.getName());
+        } else {
+          defaultInitializers.add(CrossOriginFilterInitializer.class.getName());
         }
-        else {
-          if (initializers.length() != 0) {
-            initializers += ",";
-          }
-          initializers += CrossOriginFilterInitializer.class.getName();
-        }
-        modifiedInitializers = true;
-      }
-    }
-
-    if (!initializers.contains(TimelineAuthenticationFilterInitializer.class
-      .getName())) {
-      if (initializers.length() != 0) {
-        initializers += ",";
-      }
-      initializers += TimelineAuthenticationFilterInitializer.class.getName();
-      modifiedInitializers = true;
-    }
-
-    String[] parts = initializers.split(",");
-    ArrayList<String> target = new ArrayList<String>();
-    for (String filterInitializer : parts) {
-      filterInitializer = filterInitializer.trim();
-      if (filterInitializer.equals(AuthenticationFilterInitializer.class
-        .getName())) {
-        modifiedInitializers = true;
-        continue;
       }
-      target.add(filterInitializer);
-    }
-    String actualInitializers =
-        org.apache.commons.lang.StringUtils.join(target, ",");
-    if (modifiedInitializers) {
-      conf.set("hadoop.http.filter.initializers", actualInitializers);
     }
+    TimelineServerUtils.addTimelineAuthFilter(
+        initializers, defaultInitializers, secretManagerService);
+    TimelineServerUtils.setTimelineFilters(
+        conf, initializers, defaultInitializers);
     String bindAddress = WebAppUtils.getWebAppBindURL(conf,
                           YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
                           WebAppUtils.getAHSWebAppURLWithoutScheme(conf));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
deleted file mode 100644
index ad8dc2c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline.security;
-
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
-import org.apache.hadoop.yarn.server.timeline.security.TimelineDelegationTokenSecretManagerService.TimelineDelegationTokenSecretManager;
-
-@Private
-@Unstable
-public class TimelineAuthenticationFilter
-    extends DelegationTokenAuthenticationFilter {
-
-  private static TimelineDelegationTokenSecretManager secretManager;
-
-  @Override
-  public void init(FilterConfig filterConfig) throws ServletException {
-    filterConfig.getServletContext().setAttribute(
-        DelegationTokenAuthenticationFilter.DELEGATION_TOKEN_SECRET_MANAGER_ATTR,
-        secretManager);
-    super.init(filterConfig);
-  }
-
-  public static void setTimelineDelegationTokenSecretManager(
-      TimelineDelegationTokenSecretManager secretManager) {
-    TimelineAuthenticationFilter.secretManager = secretManager;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
deleted file mode 100644
index 4e7c29a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline.security;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.FilterContainer;
-import org.apache.hadoop.http.FilterInitializer;
-import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
-import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
-import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
-import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticationHandler;
-import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Initializes {@link TimelineAuthenticationFilter} which provides support for
- * Kerberos HTTP SPNEGO authentication.
- * <p>
- * It enables Kerberos HTTP SPNEGO plus delegation token authentication for the
- * timeline server.
- * <p>
- * Refer to the {@code core-default.xml} file, after the comment 'HTTP
- * Authentication' for details on the configuration options. All related
- * configuration properties have {@code hadoop.http.authentication.} as prefix.
- */
-public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
-
-  /**
-   * The configuration prefix of timeline HTTP authentication
-   */
-  public static final String PREFIX = "yarn.timeline-service.http-authentication.";
-
-  @VisibleForTesting
-  Map<String, String> filterConfig;
-
-  /**
-   * Initializes {@link TimelineAuthenticationFilter}
-   * <p>
-   * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
-   * configuration properties prefixed with {@value #PREFIX}
-   *
-   * @param container
-   *          The filter container
-   * @param conf
-   *          Configuration for run-time parameters
-   */
-  @Override
-  public void initFilter(FilterContainer container, Configuration conf) {
-    filterConfig = new HashMap<String, String>();
-
-    // setting the cookie path to root '/' so it is used for all resources.
-    filterConfig.put(TimelineAuthenticationFilter.COOKIE_PATH, "/");
-
-    for (Map.Entry<String, String> entry : conf) {
-      String name = entry.getKey();
-      if (name.startsWith(ProxyUsers.CONF_HADOOP_PROXYUSER)) {
-        String value = conf.get(name);
-        name = name.substring("hadoop.".length());
-        filterConfig.put(name, value);
-      }
-    }
-    for (Map.Entry<String, String> entry : conf) {
-      String name = entry.getKey();
-      if (name.startsWith(PREFIX)) {
-        // yarn.timeline-service.http-authentication.proxyuser will override
-        // hadoop.proxyuser
-        String value = conf.get(name);
-        name = name.substring(PREFIX.length());
-        filterConfig.put(name, value);
-      }
-    }
-
-    String authType = filterConfig.get(AuthenticationFilter.AUTH_TYPE);
-    if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
-      filterConfig.put(AuthenticationFilter.AUTH_TYPE,
-          PseudoDelegationTokenAuthenticationHandler.class.getName());
-    } else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
-      filterConfig.put(AuthenticationFilter.AUTH_TYPE,
-          KerberosDelegationTokenAuthenticationHandler.class.getName());
-
-      // Resolve _HOST into bind address
-      String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
-      String principal =
-          filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
-      if (principal != null) {
-        try {
-          principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
-        } catch (IOException ex) {
-          throw new RuntimeException(
-              "Could not resolve Kerberos principal name: " + ex.toString(), ex);
-        }
-        filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL,
-            principal);
-      }
-    }
-
-    filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND,
-        TimelineDelegationTokenIdentifier.KIND_NAME.toString());
-
-    container.addGlobalFilter("Timeline Authentication Filter",
-        TimelineAuthenticationFilter.class.getName(),
-        filterConfig);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
deleted file mode 100644
index 0c6892a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelegationTokenSecretManagerService.java
+++ /dev/null
@@ -1,240 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline.security;
-
-import java.io.IOException;
-import java.util.Map.Entry;
-
-import org.apache.hadoop.classification.InterfaceAudience.Private;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
-import org.apache.hadoop.security.token.delegation.DelegationKey;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
-import org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore;
-import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore;
-import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore.TimelineServiceState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The service wrapper of {@link TimelineDelegationTokenSecretManager}
- */
-@Private
-@Unstable
-public class TimelineDelegationTokenSecretManagerService extends
-    AbstractService {
-
-  private TimelineDelegationTokenSecretManager secretManager = null;
-  private TimelineStateStore stateStore = null;
-
-  public TimelineDelegationTokenSecretManagerService() {
-    super(TimelineDelegationTokenSecretManagerService.class.getName());
-  }
-
-  @Override
-  protected void serviceInit(Configuration conf) throws Exception {
-    if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_RECOVERY_ENABLED,
-        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_RECOVERY_ENABLED)) {
-      stateStore = createStateStore(conf);
-      stateStore.init(conf);
-    }
-
-    long secretKeyInterval =
-        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL,
-            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL);
-    long tokenMaxLifetime =
-        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME,
-            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME);
-    long tokenRenewInterval =
-        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL,
-            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL);
-    secretManager = new TimelineDelegationTokenSecretManager(secretKeyInterval,
-        tokenMaxLifetime, tokenRenewInterval, 3600000, stateStore);
-    super.init(conf);
-  }
-
-  @Override
-  protected void serviceStart() throws Exception {
-    if (stateStore != null) {
-      stateStore.start();
-      TimelineServiceState state = stateStore.loadState();
-      secretManager.recover(state);
-    }
-
-    secretManager.startThreads();
-    super.serviceStart();
-  }
-
-  @Override
-  protected void serviceStop() throws Exception {
-    if (stateStore != null) {
-      stateStore.stop();
-    }
-
-    secretManager.stopThreads();
-    super.stop();
-  }
-
-  protected TimelineStateStore createStateStore(
-      Configuration conf) {
-    return ReflectionUtils.newInstance(
-        conf.getClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
-            LeveldbTimelineStateStore.class,
-            TimelineStateStore.class), conf);
-  }
-
-  /**
-   * Ge the instance of {link #TimelineDelegationTokenSecretManager}
-   *
-   * @return the instance of {link #TimelineDelegationTokenSecretManager}
-   */
-  public TimelineDelegationTokenSecretManager
-  getTimelineDelegationTokenSecretManager() {
-    return secretManager;
-  }
-
-  @Private
-  @Unstable
-  public static class TimelineDelegationTokenSecretManager extends
-      AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
-
-    public static final Logger LOG =
-        LoggerFactory.getLogger(TimelineDelegationTokenSecretManager.class);
-
-    private TimelineStateStore stateStore;
-
-    /**
-     * Create a timeline secret manager
-     * @param delegationKeyUpdateInterval the number of milliseconds for rolling
-     *        new secret keys.
-     * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
-     *        tokens in milliseconds
-     * @param delegationTokenRenewInterval how often the tokens must be renewed
-     *        in milliseconds
-     * @param delegationTokenRemoverScanInterval how often the tokens are
-     *        scanned for expired tokens in milliseconds
-     * @param stateStore timeline service state store
-     */
-    public TimelineDelegationTokenSecretManager(
-        long delegationKeyUpdateInterval,
-        long delegationTokenMaxLifetime,
-        long delegationTokenRenewInterval,
-        long delegationTokenRemoverScanInterval,
-        TimelineStateStore stateStore) {
-      super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
-          delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
-      this.stateStore = stateStore;
-    }
-
-    @Override
-    public TimelineDelegationTokenIdentifier createIdentifier() {
-      return new TimelineDelegationTokenIdentifier();
-    }
-
-    @Override
-    protected void storeNewMasterKey(DelegationKey key) throws IOException {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing master key " + key.getKeyId());
-      }
-      try {
-        if (stateStore != null) {
-          stateStore.storeTokenMasterKey(key);
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to store master key " + key.getKeyId(), e);
-      }
-    }
-
-    @Override
-    protected void removeStoredMasterKey(DelegationKey key) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removing master key " + key.getKeyId());
-      }
-      try {
-        if (stateStore != null) {
-          stateStore.removeTokenMasterKey(key);
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to remove master key " + key.getKeyId(), e);
-      }
-    }
-
-    @Override
-    protected void storeNewToken(TimelineDelegationTokenIdentifier tokenId,
-        long renewDate) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing token " + tokenId.getSequenceNumber());
-      }
-      try {
-        if (stateStore != null) {
-          stateStore.storeToken(tokenId, renewDate);
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to store token " + tokenId.getSequenceNumber(), e);
-      }
-    }
-
-    @Override
-    protected void removeStoredToken(TimelineDelegationTokenIdentifier tokenId)
-        throws IOException {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Storing token " + tokenId.getSequenceNumber());
-      }
-      try {
-        if (stateStore != null) {
-          stateStore.removeToken(tokenId);
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to remove token " + tokenId.getSequenceNumber(), e);
-      }
-    }
-
-    @Override
-    protected void updateStoredToken(TimelineDelegationTokenIdentifier tokenId,
-        long renewDate) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Updating token " + tokenId.getSequenceNumber());
-      }
-      try {
-        if (stateStore != null) {
-          stateStore.updateToken(tokenId, renewDate);
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to update token " + tokenId.getSequenceNumber(), e);
-      }
-    }
-
-    public void recover(TimelineServiceState state) throws IOException {
-      LOG.info("Recovering " + getClass().getSimpleName());
-      for (DelegationKey key : state.getTokenMasterKeyState()) {
-        addKey(key);
-      }
-      this.delegationTokenSequenceNumber = state.getLatestSequenceNumber();
-      for (Entry<TimelineDelegationTokenIdentifier, Long> entry :
-          state.getTokenState().entrySet()) {
-        addPersistedDelegationToken(entry.getKey(), entry.getValue());
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
new file mode 100644
index 0000000..85d8cca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineV1DelegationTokenSecretManagerService.java
@@ -0,0 +1,225 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline.security;
+
+import java.io.IOException;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.security.token.delegation.DelegationKey;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore;
+import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore;
+import org.apache.hadoop.yarn.server.timeline.recovery.TimelineStateStore.TimelineServiceState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The service wrapper of {@link TimelineV1DelegationTokenSecretManager}.
+ */
+@Private
+@Unstable
+public class TimelineV1DelegationTokenSecretManagerService extends
+    TimelineDelgationTokenSecretManagerService {
+  private TimelineStateStore stateStore = null;
+
+  public TimelineV1DelegationTokenSecretManagerService() {
+    super(TimelineV1DelegationTokenSecretManagerService.class.getName());
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_RECOVERY_ENABLED,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_RECOVERY_ENABLED)) {
+      stateStore = createStateStore(conf);
+      stateStore.init(conf);
+    }
+    super.serviceInit(conf);
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    if (stateStore != null) {
+      stateStore.start();
+      TimelineServiceState state = stateStore.loadState();
+      ((TimelineV1DelegationTokenSecretManager)
+          getTimelineDelegationTokenSecretManager()).recover(state);
+    }
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (stateStore != null) {
+      stateStore.stop();
+    }
+    super.serviceStop();
+  }
+
+  @Override
+  protected AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier>
+      createTimelineDelegationTokenSecretManager(long secretKeyInterval,
+          long tokenMaxLifetime, long tokenRenewInterval,
+          long tokenRemovalScanInterval) {
+    return new TimelineV1DelegationTokenSecretManager(secretKeyInterval,
+        tokenMaxLifetime, tokenRenewInterval, tokenRemovalScanInterval,
+        stateStore);
+  }
+
+  protected TimelineStateStore createStateStore(
+      Configuration conf) {
+    return ReflectionUtils.newInstance(
+        conf.getClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
+            LeveldbTimelineStateStore.class,
+            TimelineStateStore.class), conf);
+  }
+
+  /**
+   * Delegation token secret manager for ATSv1 and ATSv1.5.
+   */
+  @Private
+  @Unstable
+  public static class TimelineV1DelegationTokenSecretManager extends
+      AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier> {
+
+    public static final Logger LOG =
+        LoggerFactory.getLogger(TimelineV1DelegationTokenSecretManager.class);
+
+    private TimelineStateStore stateStore;
+
+    /**
+     * Create a timeline v1 secret manager.
+     * @param delegationKeyUpdateInterval the number of milliseconds for rolling
+     *        new secret keys.
+     * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+     *        tokens in milliseconds
+     * @param delegationTokenRenewInterval how often the tokens must be renewed
+     *        in milliseconds
+     * @param delegationTokenRemoverScanInterval how often the tokens are
+     *        scanned for expired tokens in milliseconds
+     * @param stateStore timeline service state store
+     */
+    public TimelineV1DelegationTokenSecretManager(
+        long delegationKeyUpdateInterval,
+        long delegationTokenMaxLifetime,
+        long delegationTokenRenewInterval,
+        long delegationTokenRemoverScanInterval,
+        TimelineStateStore stateStore) {
+      super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+          delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+      this.stateStore = stateStore;
+    }
+
+    @Override
+    public TimelineDelegationTokenIdentifier createIdentifier() {
+      return new TimelineDelegationTokenIdentifier();
+    }
+
+    @Override
+    protected void storeNewMasterKey(DelegationKey key) throws IOException {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing master key " + key.getKeyId());
+      }
+      try {
+        if (stateStore != null) {
+          stateStore.storeTokenMasterKey(key);
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to store master key " + key.getKeyId(), e);
+      }
+    }
+
+    @Override
+    protected void removeStoredMasterKey(DelegationKey key) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Removing master key " + key.getKeyId());
+      }
+      try {
+        if (stateStore != null) {
+          stateStore.removeTokenMasterKey(key);
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to remove master key " + key.getKeyId(), e);
+      }
+    }
+
+    @Override
+    protected void storeNewToken(TimelineDelegationTokenIdentifier tokenId,
+        long renewDate) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing token " + tokenId.getSequenceNumber());
+      }
+      try {
+        if (stateStore != null) {
+          stateStore.storeToken(tokenId, renewDate);
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to store token " + tokenId.getSequenceNumber(), e);
+      }
+    }
+
+    @Override
+    protected void removeStoredToken(TimelineDelegationTokenIdentifier tokenId)
+        throws IOException {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Storing token " + tokenId.getSequenceNumber());
+      }
+      try {
+        if (stateStore != null) {
+          stateStore.removeToken(tokenId);
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to remove token " + tokenId.getSequenceNumber(), e);
+      }
+    }
+
+    @Override
+    protected void updateStoredToken(TimelineDelegationTokenIdentifier tokenId,
+        long renewDate) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Updating token " + tokenId.getSequenceNumber());
+      }
+      try {
+        if (stateStore != null) {
+          stateStore.updateToken(tokenId, renewDate);
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to update token " + tokenId.getSequenceNumber(), e);
+      }
+    }
+
+    public void recover(TimelineServiceState state) throws IOException {
+      LOG.info("Recovering " + getClass().getSimpleName());
+      for (DelegationKey key : state.getTokenMasterKeyState()) {
+        addKey(key);
+      }
+      this.delegationTokenSequenceNumber = state.getLatestSequenceNumber();
+      for (Entry<TimelineDelegationTokenIdentifier, Long> entry :
+          state.getTokenState().entrySet()) {
+        addPersistedDelegationToken(entry.getKey(), entry.getValue());
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
deleted file mode 100644
index 063f512..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilter.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline.security;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.concurrent.Callable;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.KerberosTestUtils;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
-import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
-import org.apache.hadoop.yarn.client.api.TimelineClient;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
-import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
-import org.apache.hadoop.yarn.server.timeline.TimelineStore;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-@RunWith(Parameterized.class)
-public class TestTimelineAuthenticationFilter {
-
-  private static final String FOO_USER = "foo";
-  private static final String BAR_USER = "bar";
-  private static final String HTTP_USER = "HTTP";
-
-  private static final File testRootDir = new File(
-      System.getProperty("test.build.dir", "target/test-dir"),
-      TestTimelineAuthenticationFilter.class.getName() + "-root");
-  private static File httpSpnegoKeytabFile = new File(
-      KerberosTestUtils.getKeytabFile());
-  private static String httpSpnegoPrincipal =
-      KerberosTestUtils.getServerPrincipal();
-  private static final String BASEDIR =
-      System.getProperty("test.build.dir", "target/test-dir") + "/"
-          + TestTimelineAuthenticationFilter.class.getSimpleName();
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> withSsl() {
-    return Arrays.asList(new Object[][] { { false }, { true } });
-  }
-
-  private static MiniKdc testMiniKDC;
-  private static String keystoresDir;
-  private static String sslConfDir;
-  private static ApplicationHistoryServer testTimelineServer;
-  private static Configuration conf;
-  private static boolean withSsl;
-
-  public TestTimelineAuthenticationFilter(boolean withSsl) {
-    TestTimelineAuthenticationFilter.withSsl = withSsl;
-  }
-
-  @BeforeClass
-  public static void setup() {
-    try {
-      testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
-      testMiniKDC.start();
-      testMiniKDC.createPrincipal(
-          httpSpnegoKeytabFile, HTTP_USER + "/localhost");
-    } catch (Exception e) {
-      assertTrue("Couldn't setup MiniKDC", false);
-    }
-
-    try {
-      testTimelineServer = new ApplicationHistoryServer();
-      conf = new Configuration(false);
-      conf.setStrings(TimelineAuthenticationFilterInitializer.PREFIX + "type",
-          "kerberos");
-      conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
-          KerberosAuthenticationHandler.PRINCIPAL, httpSpnegoPrincipal);
-      conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
-          KerberosAuthenticationHandler.KEYTAB,
-          httpSpnegoKeytabFile.getAbsolutePath());
-      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-        "kerberos");
-      conf.set(YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL,
-        httpSpnegoPrincipal);
-      conf.set(YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
-        httpSpnegoKeytabFile.getAbsolutePath());
-      conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
-      conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
-          MemoryTimelineStore.class, TimelineStore.class);
-      conf.set(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
-          "localhost:10200");
-      conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
-          "localhost:8188");
-      conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
-          "localhost:8190");
-      conf.set("hadoop.proxyuser.HTTP.hosts", "*");
-      conf.set("hadoop.proxyuser.HTTP.users", FOO_USER);
-      conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES, 1);
-
-      if (withSsl) {
-        conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
-            HttpConfig.Policy.HTTPS_ONLY.name());
-        File base = new File(BASEDIR);
-        FileUtil.fullyDelete(base);
-        base.mkdirs();
-        keystoresDir = new File(BASEDIR).getAbsolutePath();
-        sslConfDir =
-            KeyStoreTestUtil.getClasspathDir(TestTimelineAuthenticationFilter.class);
-        KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
-      }
-
-      UserGroupInformation.setConfiguration(conf);
-      testTimelineServer.init(conf);
-      testTimelineServer.start();
-    } catch (Exception e) {
-      assertTrue("Couldn't setup TimelineServer", false);
-    }
-  }
-
-  private TimelineClient createTimelineClientForUGI() {
-    TimelineClient client = TimelineClient.createTimelineClient();
-    client.init(conf);
-    client.start();
-    return client;
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if (testMiniKDC != null) {
-      testMiniKDC.stop();
-    }
-
-    if (testTimelineServer != null) {
-      testTimelineServer.stop();
-    }
-
-    if (withSsl) {
-      KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-      File base = new File(BASEDIR);
-      FileUtil.fullyDelete(base);
-    }
-  }
-
-  @Test
-  public void testPutTimelineEntities() throws Exception {
-    KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        TimelineClient client = createTimelineClientForUGI();
-        TimelineEntity entityToStore = new TimelineEntity();
-        entityToStore.setEntityType(
-            TestTimelineAuthenticationFilter.class.getName());
-        entityToStore.setEntityId("entity1");
-        entityToStore.setStartTime(0L);
-        TimelinePutResponse putResponse = client.putEntities(entityToStore);
-        Assert.assertEquals(0, putResponse.getErrors().size());
-        TimelineEntity entityToRead =
-            testTimelineServer.getTimelineStore().getEntity(
-                "entity1", TestTimelineAuthenticationFilter.class.getName(), null);
-        Assert.assertNotNull(entityToRead);
-        return null;
-      }
-    });
-  }
-
-  @Test
-  public void testPutDomains() throws Exception {
-    KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        TimelineClient client = createTimelineClientForUGI();
-        TimelineDomain domainToStore = new TimelineDomain();
-        domainToStore.setId(TestTimelineAuthenticationFilter.class.getName());
-        domainToStore.setReaders("*");
-        domainToStore.setWriters("*");
-        client.putDomain(domainToStore);
-        TimelineDomain domainToRead =
-            testTimelineServer.getTimelineStore().getDomain(
-                TestTimelineAuthenticationFilter.class.getName());
-        Assert.assertNotNull(domainToRead);
-        return null;
-      }
-    });
-  }
-
-  @Test
-  public void testDelegationTokenOperations() throws Exception {
-    TimelineClient httpUserClient =
-      KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<TimelineClient>() {
-        @Override
-        public TimelineClient call() throws Exception {
-          return createTimelineClientForUGI();
-        }
-      });
-    UserGroupInformation httpUser =
-      KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<UserGroupInformation>() {
-        @Override
-        public UserGroupInformation call() throws Exception {
-          return UserGroupInformation.getCurrentUser();
-        }
-      });
-    // Let HTTP user to get the delegation for itself
-    Token<TimelineDelegationTokenIdentifier> token =
-      httpUserClient.getDelegationToken(httpUser.getShortUserName());
-    Assert.assertNotNull(token);
-    TimelineDelegationTokenIdentifier tDT = token.decodeIdentifier();
-    Assert.assertNotNull(tDT);
-    Assert.assertEquals(new Text(HTTP_USER), tDT.getOwner());
-
-    // Renew token
-    Assert.assertFalse(token.getService().toString().isEmpty());
-    // Renew the token from the token service address
-    long renewTime1 = httpUserClient.renewDelegationToken(token);
-    Thread.sleep(100);
-    token.setService(new Text());
-    Assert.assertTrue(token.getService().toString().isEmpty());
-    // If the token service address is not avaiable, it still can be renewed
-    // from the configured address
-    long renewTime2 = httpUserClient.renewDelegationToken(token);
-    Assert.assertTrue(renewTime1 < renewTime2);
-
-    // Cancel token
-    Assert.assertTrue(token.getService().toString().isEmpty());
-    // If the token service address is not avaiable, it still can be canceled
-    // from the configured address
-    httpUserClient.cancelDelegationToken(token);
-    // Renew should not be successful because the token is canceled
-    try {
-      httpUserClient.renewDelegationToken(token);
-      Assert.fail();
-    } catch (Exception e) {
-      Assert.assertTrue(e.getMessage().contains(
-            "Renewal request for unknown token"));
-    }
-
-    // Let HTTP user to get the delegation token for FOO user
-    UserGroupInformation fooUgi = UserGroupInformation.createProxyUser(
-        FOO_USER, httpUser);
-    TimelineClient fooUserClient = fooUgi.doAs(
-        new PrivilegedExceptionAction<TimelineClient>() {
-          @Override
-          public TimelineClient run() throws Exception {
-            return createTimelineClientForUGI();
-          }
-        });
-    token = fooUserClient.getDelegationToken(httpUser.getShortUserName());
-    Assert.assertNotNull(token);
-    tDT = token.decodeIdentifier();
-    Assert.assertNotNull(tDT);
-    Assert.assertEquals(new Text(FOO_USER), tDT.getOwner());
-    Assert.assertEquals(new Text(HTTP_USER), tDT.getRealUser());
-
-    // Renew token as the renewer
-    final Token<TimelineDelegationTokenIdentifier> tokenToRenew = token;
-    renewTime1 = httpUserClient.renewDelegationToken(tokenToRenew);
-    renewTime2 = httpUserClient.renewDelegationToken(tokenToRenew);
-    Assert.assertTrue(renewTime1 < renewTime2);
-
-    // Cancel token
-    Assert.assertFalse(tokenToRenew.getService().toString().isEmpty());
-    // Cancel the token from the token service address
-    fooUserClient.cancelDelegationToken(tokenToRenew);
-
-    // Renew should not be successful because the token is canceled
-    try {
-      httpUserClient.renewDelegationToken(tokenToRenew);
-      Assert.fail();
-    } catch (Exception e) {
-      Assert.assertTrue(
-          e.getMessage().contains("Renewal request for unknown token"));
-    }
-
-    // Let HTTP user to get the delegation token for BAR user
-    UserGroupInformation barUgi = UserGroupInformation.createProxyUser(
-        BAR_USER, httpUser);
-    TimelineClient barUserClient = barUgi.doAs(
-        new PrivilegedExceptionAction<TimelineClient>() {
-          @Override
-          public TimelineClient run() {
-            return createTimelineClientForUGI();
-          }
-        });
-
-    try {
-      barUserClient.getDelegationToken(httpUser.getShortUserName());
-      Assert.fail();
-    } catch (Exception e) {
-      Assert.assertTrue(e.getCause() instanceof AuthorizationException || e.getCause() instanceof AuthenticationException);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java
new file mode 100644
index 0000000..d918e8d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterForV1.java
@@ -0,0 +1,332 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline.security;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain;
+import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.client.api.TimelineClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
+import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore;
+import org.apache.hadoop.yarn.server.timeline.TimelineStore;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Test cases for authentication via TimelineAuthenticationFilter while
+ * publishing entities for ATSv1.
+ */
+@RunWith(Parameterized.class)
+public class TestTimelineAuthenticationFilterForV1 {
+
+  private static final String FOO_USER = "foo";
+  private static final String BAR_USER = "bar";
+  private static final String HTTP_USER = "HTTP";
+
+  private static final File TEST_ROOT_DIR = new File(
+      System.getProperty("test.build.dir", "target/test-dir"),
+          TestTimelineAuthenticationFilterForV1.class.getName() + "-root");
+  private static File httpSpnegoKeytabFile = new File(
+      KerberosTestUtils.getKeytabFile());
+  private static String httpSpnegoPrincipal =
+      KerberosTestUtils.getServerPrincipal();
+  private static final String BASEDIR =
+      System.getProperty("test.build.dir", "target/test-dir") + "/"
+          + TestTimelineAuthenticationFilterForV1.class.getSimpleName();
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> withSsl() {
+    return Arrays.asList(new Object[][] {{false}, {true}});
+  }
+
+  private static MiniKdc testMiniKDC;
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static ApplicationHistoryServer testTimelineServer;
+  private static Configuration conf;
+  private static boolean withSsl;
+
+  public TestTimelineAuthenticationFilterForV1(boolean withSsl) {
+    TestTimelineAuthenticationFilterForV1.withSsl = withSsl;
+  }
+
+  @BeforeClass
+  public static void setup() {
+    try {
+      testMiniKDC = new MiniKdc(MiniKdc.createConf(), TEST_ROOT_DIR);
+      testMiniKDC.start();
+      testMiniKDC.createPrincipal(
+          httpSpnegoKeytabFile, HTTP_USER + "/localhost");
+    } catch (Exception e) {
+      assertTrue("Couldn't setup MiniKDC", false);
+    }
+
+    try {
+      testTimelineServer = new ApplicationHistoryServer();
+      conf = new Configuration(false);
+      conf.setStrings(TimelineAuthenticationFilterInitializer.PREFIX + "type",
+          "kerberos");
+      conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
+          KerberosAuthenticationHandler.PRINCIPAL, httpSpnegoPrincipal);
+      conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
+          KerberosAuthenticationHandler.KEYTAB,
+          httpSpnegoKeytabFile.getAbsolutePath());
+      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+          "kerberos");
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL,
+          httpSpnegoPrincipal);
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+          httpSpnegoKeytabFile.getAbsolutePath());
+      conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+      conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
+          MemoryTimelineStore.class, TimelineStore.class);
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
+          "localhost:10200");
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS,
+          "localhost:8188");
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS,
+          "localhost:8190");
+      conf.set("hadoop.proxyuser.HTTP.hosts", "*");
+      conf.set("hadoop.proxyuser.HTTP.users", FOO_USER);
+      conf.setInt(YarnConfiguration.TIMELINE_SERVICE_CLIENT_MAX_RETRIES, 1);
+
+      if (withSsl) {
+        conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
+            HttpConfig.Policy.HTTPS_ONLY.name());
+        File base = new File(BASEDIR);
+        FileUtil.fullyDelete(base);
+        base.mkdirs();
+        keystoresDir = new File(BASEDIR).getAbsolutePath();
+        sslConfDir = KeyStoreTestUtil.getClasspathDir(
+            TestTimelineAuthenticationFilterForV1.class);
+        KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+      }
+
+      UserGroupInformation.setConfiguration(conf);
+      testTimelineServer.init(conf);
+      testTimelineServer.start();
+    } catch (Exception e) {
+      e.printStackTrace();
+      assertTrue("Couldn't setup TimelineServer", false);
+    }
+  }
+
+  private TimelineClient createTimelineClientForUGI() {
+    TimelineClient client = TimelineClient.createTimelineClient();
+    client.init(conf);
+    client.start();
+    return client;
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (testMiniKDC != null) {
+      testMiniKDC.stop();
+    }
+
+    if (testTimelineServer != null) {
+      testTimelineServer.stop();
+    }
+
+    if (withSsl) {
+      KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+      File base = new File(BASEDIR);
+      FileUtil.fullyDelete(base);
+    }
+  }
+
+  @Test
+  public void testPutTimelineEntities() throws Exception {
+    KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
+      @Override
+      public Void call() throws Exception {
+        TimelineClient client = createTimelineClientForUGI();
+        TimelineEntity entityToStore = new TimelineEntity();
+        entityToStore.setEntityType(
+            TestTimelineAuthenticationFilterForV1.class.getName());
+        entityToStore.setEntityId("entity1");
+        entityToStore.setStartTime(0L);
+        TimelinePutResponse putResponse = client.putEntities(entityToStore);
+        Assert.assertEquals(0, putResponse.getErrors().size());
+        TimelineEntity entityToRead =
+            testTimelineServer.getTimelineStore().getEntity("entity1",
+                TestTimelineAuthenticationFilterForV1.class.getName(), null);
+        Assert.assertNotNull(entityToRead);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testPutDomains() throws Exception {
+    KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
+      @Override
+      public Void call() throws Exception {
+        TimelineClient client = createTimelineClientForUGI();
+        TimelineDomain domainToStore = new TimelineDomain();
+        domainToStore.setId(
+            TestTimelineAuthenticationFilterForV1.class.getName());
+        domainToStore.setReaders("*");
+        domainToStore.setWriters("*");
+        client.putDomain(domainToStore);
+        TimelineDomain domainToRead =
+            testTimelineServer.getTimelineStore().getDomain(
+                TestTimelineAuthenticationFilterForV1.class.getName());
+        Assert.assertNotNull(domainToRead);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testDelegationTokenOperations() throws Exception {
+    TimelineClient httpUserClient =
+        KerberosTestUtils.doAs(HTTP_USER + "/localhost",
+            new Callable<TimelineClient>() {
+            @Override
+            public TimelineClient call() throws Exception {
+              return createTimelineClientForUGI();
+            }
+          });
+    UserGroupInformation httpUser =
+        KerberosTestUtils.doAs(HTTP_USER + "/localhost",
+            new Callable<UserGroupInformation>() {
+            @Override
+            public UserGroupInformation call() throws Exception {
+              return UserGroupInformation.getCurrentUser();
+            }
+          });
+    // Let HTTP user to get the delegation for itself
+    Token<TimelineDelegationTokenIdentifier> token =
+        httpUserClient.getDelegationToken(httpUser.getShortUserName());
+    Assert.assertNotNull(token);
+    TimelineDelegationTokenIdentifier tDT = token.decodeIdentifier();
+    Assert.assertNotNull(tDT);
+    Assert.assertEquals(new Text(HTTP_USER), tDT.getOwner());
+
+    // Renew token
+    Assert.assertFalse(token.getService().toString().isEmpty());
+    // Renew the token from the token service address
+    long renewTime1 = httpUserClient.renewDelegationToken(token);
+    Thread.sleep(100);
+    token.setService(new Text());
+    Assert.assertTrue(token.getService().toString().isEmpty());
+    // If the token service address is not avaiable, it still can be renewed
+    // from the configured address
+    long renewTime2 = httpUserClient.renewDelegationToken(token);
+    Assert.assertTrue(renewTime1 < renewTime2);
+
+    // Cancel token
+    Assert.assertTrue(token.getService().toString().isEmpty());
+    // If the token service address is not avaiable, it still can be canceled
+    // from the configured address
+    httpUserClient.cancelDelegationToken(token);
+    // Renew should not be successful because the token is canceled
+    try {
+      httpUserClient.renewDelegationToken(token);
+      Assert.fail();
+    } catch (Exception e) {
+      Assert.assertTrue(e.getMessage().contains(
+            "Renewal request for unknown token"));
+    }
+
+    // Let HTTP user to get the delegation token for FOO user
+    UserGroupInformation fooUgi = UserGroupInformation.createProxyUser(
+        FOO_USER, httpUser);
+    TimelineClient fooUserClient = fooUgi.doAs(
+        new PrivilegedExceptionAction<TimelineClient>() {
+          @Override
+          public TimelineClient run() throws Exception {
+            return createTimelineClientForUGI();
+          }
+        });
+    token = fooUserClient.getDelegationToken(httpUser.getShortUserName());
+    Assert.assertNotNull(token);
+    tDT = token.decodeIdentifier();
+    Assert.assertNotNull(tDT);
+    Assert.assertEquals(new Text(FOO_USER), tDT.getOwner());
+    Assert.assertEquals(new Text(HTTP_USER), tDT.getRealUser());
+
+    // Renew token as the renewer
+    final Token<TimelineDelegationTokenIdentifier> tokenToRenew = token;
+    renewTime1 = httpUserClient.renewDelegationToken(tokenToRenew);
+    renewTime2 = httpUserClient.renewDelegationToken(tokenToRenew);
+    Assert.assertTrue(renewTime1 < renewTime2);
+
+    // Cancel token
+    Assert.assertFalse(tokenToRenew.getService().toString().isEmpty());
+    // Cancel the token from the token service address
+    fooUserClient.cancelDelegationToken(tokenToRenew);
+
+    // Renew should not be successful because the token is canceled
+    try {
+      httpUserClient.renewDelegationToken(tokenToRenew);
+      Assert.fail();
+    } catch (Exception e) {
+      Assert.assertTrue(
+          e.getMessage().contains("Renewal request for unknown token"));
+    }
+
+    // Let HTTP user to get the delegation token for BAR user
+    UserGroupInformation barUgi = UserGroupInformation.createProxyUser(
+        BAR_USER, httpUser);
+    TimelineClient barUserClient = barUgi.doAs(
+        new PrivilegedExceptionAction<TimelineClient>() {
+          @Override
+          public TimelineClient run() {
+            return createTimelineClientForUGI();
+          }
+        });
+
+    try {
+      barUserClient.getDelegationToken(httpUser.getShortUserName());
+      Assert.fail();
+    } catch (Exception e) {
+      Assert.assertTrue(e.getCause() instanceof AuthorizationException ||
+          e.getCause() instanceof AuthenticationException);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
deleted file mode 100644
index 430911e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/security/TestTimelineAuthenticationFilterInitializer.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline.security;
-
-import org.junit.Assert;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.FilterContainer;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import static org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer.PREFIX;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-
-public class TestTimelineAuthenticationFilterInitializer {
-
-  @Test
-  public void testProxyUserConfiguration() {
-    FilterContainer container = Mockito.mock(FilterContainer.class);
-    for (int i = 0; i < 3; ++i) {
-      Configuration conf = new YarnConfiguration();
-      switch (i) {
-      case 0:
-        // hadoop.proxyuser prefix
-        conf.set("hadoop.proxyuser.foo.hosts", "*");
-        conf.set("hadoop.proxyuser.foo.users", "*");
-        conf.set("hadoop.proxyuser.foo.groups", "*");
-        break;
-      case 1:
-        // yarn.timeline-service.http-authentication.proxyuser prefix
-        conf.set(PREFIX + "proxyuser.foo.hosts", "*");
-        conf.set(PREFIX + "proxyuser.foo.users", "*");
-        conf.set(PREFIX + "proxyuser.foo.groups", "*");
-        break;
-      case 2:
-        // hadoop.proxyuser prefix has been overwritten by
-        // yarn.timeline-service.http-authentication.proxyuser prefix
-        conf.set("hadoop.proxyuser.foo.hosts", "bar");
-        conf.set("hadoop.proxyuser.foo.users", "bar");
-        conf.set("hadoop.proxyuser.foo.groups", "bar");
-        conf.set(PREFIX + "proxyuser.foo.hosts", "*");
-        conf.set(PREFIX + "proxyuser.foo.users", "*");
-        conf.set(PREFIX + "proxyuser.foo.groups", "*");
-        break;
-      default:
-        break;
-      }
-
-      TimelineAuthenticationFilterInitializer initializer =
-          new TimelineAuthenticationFilterInitializer();
-      initializer.initFilter(container, conf);
-      Assert.assertEquals(
-          "*", initializer.filterConfig.get("proxyuser.foo.hosts"));
-      Assert.assertEquals(
-          "*", initializer.filterConfig.get("proxyuser.foo.users"));
-      Assert.assertEquals(
-          "*", initializer.filterConfig.get("proxyuser.foo.groups"));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
new file mode 100644
index 0000000..f6d1863
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilter.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline.security;
+
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+
+/**
+ * Timeline authentication filter provides delegation token support for ATSv1
+ * and ATSv2.
+ */
+@Private
+@Unstable
+public class TimelineAuthenticationFilter
+    extends DelegationTokenAuthenticationFilter {
+
+  private static AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier> secretManager;
+
+  @Override
+  public void init(FilterConfig filterConfig) throws ServletException {
+    filterConfig.getServletContext().setAttribute(
+        DelegationTokenAuthenticationFilter.
+            DELEGATION_TOKEN_SECRET_MANAGER_ATTR, secretManager);
+    super.init(filterConfig);
+  }
+
+  public static void setTimelineDelegationTokenSecretManager(
+      AbstractDelegationTokenSecretManager
+          <TimelineDelegationTokenIdentifier> secretMgr) {
+    TimelineAuthenticationFilter.secretManager = secretMgr;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
new file mode 100644
index 0000000..4e7c29a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline.security;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.FilterContainer;
+import org.apache.hadoop.http.FilterInitializer;
+import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
+import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticationHandler;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Initializes {@link TimelineAuthenticationFilter} which provides support for
+ * Kerberos HTTP SPNEGO authentication.
+ * <p>
+ * It enables Kerberos HTTP SPNEGO plus delegation token authentication for the
+ * timeline server.
+ * <p>
+ * Refer to the {@code core-default.xml} file, after the comment 'HTTP
+ * Authentication' for details on the configuration options. All related
+ * configuration properties have {@code hadoop.http.authentication.} as prefix.
+ */
+public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
+
+  /**
+   * The configuration prefix of timeline HTTP authentication
+   */
+  public static final String PREFIX = "yarn.timeline-service.http-authentication.";
+
+  @VisibleForTesting
+  Map<String, String> filterConfig;
+
+  /**
+   * Initializes {@link TimelineAuthenticationFilter}
+   * <p>
+   * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
+   * configuration properties prefixed with {@value #PREFIX}
+   *
+   * @param container
+   *          The filter container
+   * @param conf
+   *          Configuration for run-time parameters
+   */
+  @Override
+  public void initFilter(FilterContainer container, Configuration conf) {
+    filterConfig = new HashMap<String, String>();
+
+    // setting the cookie path to root '/' so it is used for all resources.
+    filterConfig.put(TimelineAuthenticationFilter.COOKIE_PATH, "/");
+
+    for (Map.Entry<String, String> entry : conf) {
+      String name = entry.getKey();
+      if (name.startsWith(ProxyUsers.CONF_HADOOP_PROXYUSER)) {
+        String value = conf.get(name);
+        name = name.substring("hadoop.".length());
+        filterConfig.put(name, value);
+      }
+    }
+    for (Map.Entry<String, String> entry : conf) {
+      String name = entry.getKey();
+      if (name.startsWith(PREFIX)) {
+        // yarn.timeline-service.http-authentication.proxyuser will override
+        // hadoop.proxyuser
+        String value = conf.get(name);
+        name = name.substring(PREFIX.length());
+        filterConfig.put(name, value);
+      }
+    }
+
+    String authType = filterConfig.get(AuthenticationFilter.AUTH_TYPE);
+    if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
+      filterConfig.put(AuthenticationFilter.AUTH_TYPE,
+          PseudoDelegationTokenAuthenticationHandler.class.getName());
+    } else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
+      filterConfig.put(AuthenticationFilter.AUTH_TYPE,
+          KerberosDelegationTokenAuthenticationHandler.class.getName());
+
+      // Resolve _HOST into bind address
+      String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
+      String principal =
+          filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
+      if (principal != null) {
+        try {
+          principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
+        } catch (IOException ex) {
+          throw new RuntimeException(
+              "Could not resolve Kerberos principal name: " + ex.toString(), ex);
+        }
+        filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL,
+            principal);
+      }
+    }
+
+    filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND,
+        TimelineDelegationTokenIdentifier.KIND_NAME.toString());
+
+    container.addGlobalFilter("Timeline Authentication Filter",
+        TimelineAuthenticationFilter.class.getName(),
+        filterConfig);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelgationTokenSecretManagerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelgationTokenSecretManagerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelgationTokenSecretManagerService.java
new file mode 100644
index 0000000..2e95af2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineDelgationTokenSecretManagerService.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timeline.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
+
+/**
+ * Abstract implementation of delegation token manager service for different
+ * versions of timeline service.
+ */
+public abstract class TimelineDelgationTokenSecretManagerService extends
+    AbstractService {
+
+  public TimelineDelgationTokenSecretManagerService(String name) {
+    super(name);
+  }
+
+  private static long delegationTokenRemovalScanInterval = 3600000L;
+
+  private AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier> secretManager = null;
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    long secretKeyInterval =
+        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL,
+            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL);
+    long tokenMaxLifetime =
+        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME,
+            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME);
+    long tokenRenewInterval =
+        conf.getLong(YarnConfiguration.TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL,
+            YarnConfiguration.DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL);
+    secretManager = createTimelineDelegationTokenSecretManager(
+        secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
+        delegationTokenRemovalScanInterval);
+    super.init(conf);
+  }
+
+  protected abstract
+      AbstractDelegationTokenSecretManager<TimelineDelegationTokenIdentifier>
+          createTimelineDelegationTokenSecretManager(long secretKeyInterval,
+          long tokenMaxLifetime, long tokenRenewInterval,
+          long tokenRemovalScanInterval);
+
+  @Override
+  protected void serviceStart() throws Exception {
+    secretManager.startThreads();
+    super.serviceStart();
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    secretManager.stopThreads();
+    super.stop();
+  }
+
+  public AbstractDelegationTokenSecretManager
+      <TimelineDelegationTokenIdentifier>
+          getTimelineDelegationTokenSecretManager() {
+    return secretManager;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/879de512/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/package-info.java
new file mode 100644
index 0000000..14a52e3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/package-info.java
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.server.timeline.security contains classes related
+ * to timeline authentication filters and abstract delegation token service for
+ * ATSv1 and ATSv2.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.timeline.security;
+import org.apache.hadoop.classification.InterfaceAudience;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: YARN-7094. Document the current known issue with server-side NM graceful decom (rkanter)

Posted by st...@apache.org.
YARN-7094. Document the current known issue with server-side NM graceful decom (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce79f7be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce79f7be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce79f7be

Branch: refs/heads/HADOOP-13345
Commit: ce79f7be29dfbad444614cb340c59e4ff8137e89
Parents: cc23514
Author: Robert Kanter <rk...@apache.org>
Authored: Wed Aug 30 18:04:55 2017 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Wed Aug 30 18:04:55 2017 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java  | 3 ++-
 .../hadoop-yarn-site/src/site/markdown/YarnCommands.md           | 4 ++--
 2 files changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce79f7be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index ac5f559..24c1da0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -113,7 +113,8 @@ public class RMAdminCLI extends HAAdmin {
               + " be handled by the client or the ResourceManager. The client"
               + "-side tracking is blocking, while the server-side tracking"
               + " is not. Omitting the timeout, or a timeout of -1, indicates"
-              + " an infinite timeout."))
+              + " an infinite timeout. Known Issue: the server-side tracking"
+              + " will immediately decommission if an RM HA failover occurs."))
           .put("-refreshNodesResources", new UsageInfo("",
               "Refresh resources of NodeManagers at the ResourceManager."))
           .put("-refreshSuperUserGroupsConfiguration", new UsageInfo("",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce79f7be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
index 478377fa..5f430ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnCommands.md
@@ -189,7 +189,7 @@ Usage:
 ```
   Usage: yarn rmadmin
      -refreshQueues
-     -refreshNodes [-g [timeout in seconds]]
+     -refreshNodes [-g|graceful [timeout in seconds] -client|server]
      -refreshNodesResources
      -refreshSuperUserGroupsConfiguration
      -refreshUserToGroupsMappings
@@ -214,7 +214,7 @@ Usage:
 | COMMAND\_OPTIONS | Description |
 |:---- |:---- |
 | -refreshQueues | Reload the queues' acls, states and scheduler specific properties. ResourceManager will reload the mapred-queues configuration file. |
-| -refreshNodes [-g|graceful [timeout in seconds] -client|server] | Refresh the hosts information at the ResourceManager. -g option indicates graceful decommission of excluded hosts, in which case, the optional timeout indicates maximal time in seconds ResourceManager should wait before forcefully mark the node as decommissioned. |
+| -refreshNodes [-g&#124;graceful [timeout in seconds] -client&#124;server] | Refresh the hosts information at the ResourceManager. Here [-g&#124;graceful [timeout in seconds] -client&#124;server] is optional, if we specify the timeout then ResourceManager will wait for timeout before marking the NodeManager as decommissioned. The -client&#124;server indicates if the timeout tracking should be handled by the client or the ResourceManager. The client-side tracking is blocking, while the server-side tracking is not. Omitting the timeout, or a timeout of -1, indicates an infinite timeout. Known Issue: the server-side tracking will immediately decommission if an RM HA failover occurs. |
 | -refreshNodesResources | Refresh resources of NodeManagers at the ResourceManager. |
 | -refreshSuperUserGroupsConfiguration | Refresh superuser proxy groups mappings. |
 | -refreshUserToGroupsMappings | Refresh user-to-groups mappings. |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: YARN-6801. NPE in RM while setting collectors map in NodeHeartbeatResponse. Contributed by Vrushali C.

Posted by st...@apache.org.
YARN-6801. NPE in RM while setting collectors map in NodeHeartbeatResponse. Contributed by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66041316
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66041316
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66041316

Branch: refs/heads/HADOOP-13345
Commit: 660413165aa25815bbba66ac2195b0ae17184844
Parents: ac7f52d
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Tue Jul 11 17:59:47 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../resourcemanager/ResourceTrackerService.java   | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66041316/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 112c2dd..cc47e02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -620,13 +620,17 @@ public class ResourceTrackerService extends AbstractService implements
     Map<ApplicationId, RMApp> rmApps = rmContext.getRMApps();
     // Set collectors for all running apps on this node.
     for (ApplicationId appId : runningApps) {
-      AppCollectorData appCollectorData = rmApps.get(appId).getCollectorData();
-      if (appCollectorData != null) {
-        liveAppCollectorsMap.put(appId, appCollectorData);
-      } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Collector for applicaton: " + appId +
-              " hasn't registered yet!");
+      RMApp app = rmApps.get(appId);
+      if (app != null) {
+        AppCollectorData appCollectorData = rmApps.get(appId)
+            .getCollectorData();
+        if (appCollectorData != null) {
+          liveAppCollectorsMap.put(appId, appCollectorData);
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Collector for applicaton: " + appId +
+                " hasn't registered yet!");
+          }
         }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: MAPREDUCE-6892. Issues with the count of failed/killed tasks in the jhist file. (Peter Bacsko via Haibo Chen)

Posted by st...@apache.org.
MAPREDUCE-6892. Issues with the count of failed/killed tasks in the jhist file. (Peter Bacsko via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d04f85f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d04f85f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d04f85f3

Branch: refs/heads/HADOOP-13345
Commit: d04f85f387e4a78816bc9966ee2b4a647ee05faf
Parents: a20e710
Author: Haibo Chen <ha...@apache.org>
Authored: Wed Aug 30 10:07:48 2017 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Wed Aug 30 10:07:48 2017 -0700

----------------------------------------------------------------------
 .../jobhistory/JobHistoryEventHandler.java      |  78 +++++++++---
 .../hadoop/mapreduce/jobhistory/JobSummary.java |  49 ++++++--
 .../apache/hadoop/mapreduce/v2/app/job/Job.java |   4 +
 .../mapreduce/v2/app/job/impl/JobImpl.java      |  43 ++++++-
 .../jobhistory/TestJobHistoryEventHandler.java  |  41 ++++---
 .../mapreduce/jobhistory/TestJobSummary.java    |   6 +-
 .../hadoop/mapreduce/v2/app/MockJobs.java       |  19 +++
 .../mapreduce/v2/app/TestRuntimeEstimators.java |  20 +++
 .../src/main/avro/Events.avpr                   |  10 +-
 .../mapreduce/jobhistory/HistoryViewer.java     |  18 +--
 .../HumanReadableHistoryViewerPrinter.java      |   4 +-
 .../jobhistory/JSONHistoryViewerPrinter.java    |   4 +-
 .../mapreduce/jobhistory/JobFinishedEvent.java  |  55 ++++++---
 .../mapreduce/jobhistory/JobHistoryParser.java  |  34 ++++--
 .../JobUnsuccessfulCompletionEvent.java         |  78 +++++++++---
 .../jobhistory/TestHistoryViewerPrinter.java    |   4 +-
 .../hadoop/mapreduce/v2/hs/CompletedJob.java    |  55 ++++++++-
 .../hadoop/mapreduce/v2/hs/PartialJob.java      |  19 +++
 .../hadoop/mapreduce/v2/hs/UnparsedJob.java     |  20 +++
 .../mapreduce/v2/hs/TestJobHistoryParsing.java  | 122 +++++++++++++++++--
 .../v2/hs/webapp/TestHsWebServicesAcls.java     |  20 +++
 .../rumen/Job20LineHistoryEventEmitter.java     |   6 +-
 22 files changed, 573 insertions(+), 136 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index cfa91f5..8fa417b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -431,10 +431,18 @@ public class JobHistoryEventHandler extends AbstractService
             + " to have not been closed. Will close");
           //Create a JobFinishEvent so that it is written to the job history
           final Job job = context.getJob(toClose);
+          int successfulMaps = job.getCompletedMaps() - job.getFailedMaps()
+                  - job.getKilledMaps();
+          int successfulReduces = job.getCompletedReduces()
+                  - job.getFailedReduces() - job.getKilledReduces();
+
           JobUnsuccessfulCompletionEvent jucEvent =
             new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(toClose),
-                System.currentTimeMillis(), job.getCompletedMaps(),
-                job.getCompletedReduces(),
+                System.currentTimeMillis(),
+                successfulMaps,
+                successfulReduces,
+                job.getFailedMaps(), job.getFailedReduces(),
+                job.getKilledMaps(), job.getKilledReduces(),
                 createJobStateForJobUnsuccessfulCompletionEvent(
                     mi.getForcedJobStateOnShutDown()),
                 job.getDiagnostics());
@@ -655,9 +663,9 @@ public class JobHistoryEventHandler extends AbstractService
           JobFinishedEvent jFinishedEvent =
               (JobFinishedEvent) event.getHistoryEvent();
           mi.getJobIndexInfo().setFinishTime(jFinishedEvent.getFinishTime());
-          mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getFinishedMaps());
+          mi.getJobIndexInfo().setNumMaps(jFinishedEvent.getSucceededMaps());
           mi.getJobIndexInfo().setNumReduces(
-              jFinishedEvent.getFinishedReduces());
+              jFinishedEvent.getSucceededReduces());
           mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString());
           closeEventWriter(event.getJobID());
           processDoneFiles(event.getJobID());
@@ -672,8 +680,8 @@ public class JobHistoryEventHandler extends AbstractService
           JobUnsuccessfulCompletionEvent jucEvent =
               (JobUnsuccessfulCompletionEvent) event.getHistoryEvent();
           mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
-          mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps());
-          mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces());
+          mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
+          mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
           mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
           closeEventWriter(event.getJobID());
           if(context.isLastAMRetry())
@@ -690,8 +698,8 @@ public class JobHistoryEventHandler extends AbstractService
               (JobUnsuccessfulCompletionEvent) event
               .getHistoryEvent();
           mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime());
-          mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps());
-          mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces());
+          mi.getJobIndexInfo().setNumMaps(jucEvent.getSucceededMaps());
+          mi.getJobIndexInfo().setNumReduces(jucEvent.getSucceededReduces());
           mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus());
           closeEventWriter(event.getJobID());
           processDoneFiles(event.getJobID());
@@ -739,10 +747,12 @@ public class JobHistoryEventHandler extends AbstractService
     case JOB_FINISHED:
       JobFinishedEvent jfe = (JobFinishedEvent) event;
       summary.setJobFinishTime(jfe.getFinishTime());
-      summary.setNumFinishedMaps(jfe.getFinishedMaps());
+      summary.setNumSucceededMaps(jfe.getSucceededMaps());
       summary.setNumFailedMaps(jfe.getFailedMaps());
-      summary.setNumFinishedReduces(jfe.getFinishedReduces());
+      summary.setNumSucceededReduces(jfe.getSucceededReduces());
       summary.setNumFailedReduces(jfe.getFailedReduces());
+      summary.setNumKilledMaps(jfe.getKilledMaps());
+      summary.setNumKilledReduces(jfe.getKilledReduces());
       if (summary.getJobStatus() == null)
         summary
             .setJobStatus(org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED
@@ -753,11 +763,21 @@ public class JobHistoryEventHandler extends AbstractService
       break;
     case JOB_FAILED:
     case JOB_KILLED:
+      Job job = context.getJob(jobId);
       JobUnsuccessfulCompletionEvent juce = (JobUnsuccessfulCompletionEvent) event;
+      int successfulMaps = job.getCompletedMaps() - job.getFailedMaps()
+          - job.getKilledMaps();
+      int successfulReduces = job.getCompletedReduces()
+          - job.getFailedReduces() - job.getKilledReduces();
+
       summary.setJobStatus(juce.getStatus());
-      summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps());
-      summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces());
+      summary.setNumSucceededMaps(successfulMaps);
+      summary.setNumSucceededReduces(successfulReduces);
+      summary.setNumFailedMaps(job.getFailedMaps());
+      summary.setNumFailedReduces(job.getFailedReduces());
       summary.setJobFinishTime(juce.getFinishTime());
+      summary.setNumKilledMaps(juce.getKilledMaps());
+      summary.setNumKilledReduces(juce.getKilledReduces());
       setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters());
       break;
     default:
@@ -840,12 +860,22 @@ public class JobHistoryEventHandler extends AbstractService
         JobUnsuccessfulCompletionEvent juce =
               (JobUnsuccessfulCompletionEvent) event;
         tEvent.addEventInfo("FINISH_TIME", juce.getFinishTime());
-        tEvent.addEventInfo("NUM_MAPS", juce.getFinishedMaps());
-        tEvent.addEventInfo("NUM_REDUCES", juce.getFinishedReduces());
+        tEvent.addEventInfo("NUM_MAPS",
+            juce.getSucceededMaps() +
+            juce.getFailedMaps() +
+            juce.getKilledMaps());
+        tEvent.addEventInfo("NUM_REDUCES",
+            juce.getSucceededReduces() +
+            juce.getFailedReduces() +
+            juce.getKilledReduces());
         tEvent.addEventInfo("JOB_STATUS", juce.getStatus());
         tEvent.addEventInfo("DIAGNOSTICS", juce.getDiagnostics());
-        tEvent.addEventInfo("FINISHED_MAPS", juce.getFinishedMaps());
-        tEvent.addEventInfo("FINISHED_REDUCES", juce.getFinishedReduces());
+        tEvent.addEventInfo("SUCCESSFUL_MAPS", juce.getSucceededMaps());
+        tEvent.addEventInfo("SUCCESSFUL_REDUCES", juce.getSucceededReduces());
+        tEvent.addEventInfo("FAILED_MAPS", juce.getFailedMaps());
+        tEvent.addEventInfo("FAILED_REDUCES", juce.getFailedReduces());
+        tEvent.addEventInfo("KILLED_MAPS", juce.getKilledMaps());
+        tEvent.addEventInfo("KILLED_REDUCES", juce.getKilledReduces());
         tEntity.addEvent(tEvent);
         tEntity.setEntityId(jobId.toString());
         tEntity.setEntityType(MAPREDUCE_JOB_ENTITY_TYPE);
@@ -853,12 +883,20 @@ public class JobHistoryEventHandler extends AbstractService
       case JOB_FINISHED:
         JobFinishedEvent jfe = (JobFinishedEvent) event;
         tEvent.addEventInfo("FINISH_TIME", jfe.getFinishTime());
-        tEvent.addEventInfo("NUM_MAPS", jfe.getFinishedMaps());
-        tEvent.addEventInfo("NUM_REDUCES", jfe.getFinishedReduces());
+        tEvent.addEventInfo("NUM_MAPS",
+            jfe.getSucceededMaps() +
+            jfe.getFailedMaps() +
+            jfe.getKilledMaps());
+        tEvent.addEventInfo("NUM_REDUCES",
+            jfe.getSucceededReduces() +
+            jfe.getFailedReduces() +
+            jfe.getKilledReduces());
         tEvent.addEventInfo("FAILED_MAPS", jfe.getFailedMaps());
         tEvent.addEventInfo("FAILED_REDUCES", jfe.getFailedReduces());
-        tEvent.addEventInfo("FINISHED_MAPS", jfe.getFinishedMaps());
-        tEvent.addEventInfo("FINISHED_REDUCES", jfe.getFinishedReduces());
+        tEvent.addEventInfo("SUCCESSFUL_MAPS", jfe.getSucceededMaps());
+        tEvent.addEventInfo("SUCCESSFUL_REDUCES", jfe.getSucceededReduces());
+        tEvent.addEventInfo("KILLED_MAPS", jfe.getKilledMaps());
+        tEvent.addEventInfo("KILLED_REDUCES", jfe.getKilledReduces());
         tEvent.addEventInfo("MAP_COUNTERS_GROUPS",
             JobHistoryEventUtils.countersToJSON(jfe.getMapCounters()));
         tEvent.addEventInfo("REDUCE_COUNTERS_GROUPS",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
index abe9518..22ae079 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobSummary.java
@@ -30,10 +30,12 @@ public class JobSummary {
   private long firstReduceTaskLaunchTime; // ReduceAttemptStarted |
                                           // TaskAttemptStartEvent
   private long jobFinishTime;
-  private int numFinishedMaps;
+  private int numSucceededMaps;
   private int numFailedMaps;
-  private int numFinishedReduces;
+  private int numSucceededReduces;
   private int numFailedReduces;
+  private int numKilledMaps;
+  private int numKilledReduces;
   private int resourcesPerMap; // resources used per map/min resource
   private int resourcesPerReduce; // resources used per reduce/min resource
   // resource models
@@ -98,12 +100,12 @@ public class JobSummary {
     this.jobFinishTime = jobFinishTime;
   }
 
-  public int getNumFinishedMaps() {
-    return numFinishedMaps;
+  public int getNumSucceededMaps() {
+    return numSucceededMaps;
   }
 
-  public void setNumFinishedMaps(int numFinishedMaps) {
-    this.numFinishedMaps = numFinishedMaps;
+  public void setNumSucceededMaps(int numSucceededMaps) {
+    this.numSucceededMaps = numSucceededMaps;
   }
 
   public int getNumFailedMaps() {
@@ -114,6 +116,22 @@ public class JobSummary {
     this.numFailedMaps = numFailedMaps;
   }
 
+  public int getKilledMaps() {
+    return numKilledMaps;
+  }
+
+  public void setNumKilledMaps(int numKilledMaps) {
+    this.numKilledMaps = numKilledMaps;
+  }
+
+  public int getKilledReduces() {
+    return numKilledReduces;
+  }
+
+  public void setNumKilledReduces(int numKilledReduces) {
+    this.numKilledReduces = numKilledReduces;
+  }
+
   public int getResourcesPerMap() {
     return resourcesPerMap;
   }
@@ -122,12 +140,12 @@ public class JobSummary {
     this.resourcesPerMap = resourcesPerMap;
   }
   
-  public int getNumFinishedReduces() {
-    return numFinishedReduces;
+  public int getNumSucceededReduces() {
+    return numSucceededReduces;
   }
 
-  public void setNumFinishedReduces(int numFinishedReduces) {
-    this.numFinishedReduces = numFinishedReduces;
+  public void setNumSucceededReduces(int numSucceededReduces) {
+    this.numSucceededReduces = numSucceededReduces;
   }
 
   public int getNumFailedReduces() {
@@ -204,8 +222,15 @@ public class JobSummary {
       .add("finishTime", jobFinishTime)
       .add("resourcesPerMap", resourcesPerMap)
       .add("resourcesPerReduce", resourcesPerReduce)
-      .add("numMaps", numFinishedMaps + numFailedMaps)
-      .add("numReduces", numFinishedReduces + numFailedReduces)
+      .add("numMaps", numSucceededMaps + numFailedMaps + numKilledMaps)
+      .add("numReduces", numSucceededReduces + numFailedReduces
+          + numKilledReduces)
+      .add("succededMaps", numSucceededMaps)
+      .add("succeededReduces", numSucceededReduces)
+      .add("failedMaps", numFailedMaps)
+      .add("failedReduces", numFailedReduces)
+      .add("killedMaps", numKilledMaps)
+      .add("killedReduces", numKilledReduces)
       .add("user", user)
       .add("queue", queue)
       .add("status", jobStatus)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
index 7738810..437707b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
@@ -65,6 +65,10 @@ public interface Job {
   int getTotalReduces();
   int getCompletedMaps();
   int getCompletedReduces();
+  int getFailedMaps();
+  int getFailedReduces();
+  int getKilledMaps();
+  int getKilledReduces();
   float getProgress();
   boolean isUber();
   String getUserName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 6880b6c..abc3e61 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -1684,6 +1684,10 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
               finishTime,
               succeededMapTaskCount,
               succeededReduceTaskCount,
+              failedMapTaskCount,
+              failedReduceTaskCount,
+              killedMapTaskCount,
+              killedReduceTaskCount,
               finalState.toString(),
               diagnostics);
       eventHandler.handle(new JobHistoryEvent(jobId,
@@ -1748,6 +1752,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
         job.oldJobId, job.finishTime,
         job.succeededMapTaskCount, job.succeededReduceTaskCount,
         job.failedMapTaskCount, job.failedReduceTaskCount,
+        job.killedMapTaskCount, job.killedReduceTaskCount,
         job.finalMapCounters,
         job.finalReduceCounters,
         job.fullCounters);
@@ -1797,7 +1802,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       job.setFinishTime();
       JobUnsuccessfulCompletionEvent failedEvent =
           new JobUnsuccessfulCompletionEvent(job.oldJobId,
-              job.finishTime, 0, 0,
+              job.finishTime, 0, 0, 0, 0, 0, 0,
               JobStateInternal.KILLED.toString(), job.diagnostics);
       job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
       job.finished(JobStateInternal.KILLED);
@@ -1954,8 +1959,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
     @Override
     public JobStateInternal transition(JobImpl job, JobEvent event) {
       job.completedTaskCount++;
-      LOG.info("Num completed Tasks: " + job.completedTaskCount);
       JobTaskEvent taskEvent = (JobTaskEvent) event;
+      LOG.info("Num completed Tasks: " + job.completedTaskCount);
       Task task = job.tasks.get(taskEvent.getTaskID());
       if (taskEvent.getState() == TaskState.SUCCEEDED) {
         taskSucceeded(job, task);
@@ -1991,11 +1996,15 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
         job.allowedMapFailuresPercent*job.numMapTasks ||
         job.failedReduceTaskCount*100 > 
         job.allowedReduceFailuresPercent*job.numReduceTasks) {
+
         job.setFinishTime();
 
         String diagnosticMsg = "Job failed as tasks failed. " +
             "failedMaps:" + job.failedMapTaskCount + 
-            " failedReduces:" + job.failedReduceTaskCount;
+            " failedReduces:" + job.failedReduceTaskCount +
+            " killedMaps:" + job.killedMapTaskCount +
+            " killedReduces: " + job.killedReduceTaskCount;
+
         LOG.info(diagnosticMsg);
         job.addDiagnostic(diagnosticMsg);
 
@@ -2226,7 +2235,13 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       job.setFinishTime();
       JobUnsuccessfulCompletionEvent failedEvent =
           new JobUnsuccessfulCompletionEvent(job.oldJobId,
-              job.finishTime, 0, 0,
+              job.finishTime,
+              job.succeededMapTaskCount,
+              job.succeededReduceTaskCount,
+              job.failedMapTaskCount,
+              job.failedReduceTaskCount,
+              job.killedMapTaskCount,
+              job.killedReduceTaskCount,
               jobHistoryString, job.diagnostics);
       job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
       job.finished(terminationState);
@@ -2266,4 +2281,24 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   public void setJobPriority(Priority priority) {
     this.jobPriority = priority;
   }
+
+  @Override
+  public int getFailedMaps() {
+    return failedMapTaskCount;
+  }
+
+  @Override
+  public int getFailedReduces() {
+    return failedReduceTaskCount;
+  }
+
+  @Override
+  public int getKilledMaps() {
+    return killedMapTaskCount;
+  }
+
+  @Override
+  public int getKilledReduces() {
+    return killedReduceTaskCount;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index e35a84d..47caf44 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -275,7 +275,8 @@ public class TestJobHistoryEventHandler {
             t.taskID, t.taskAttemptID, 0, TaskType.MAP, "", null, 0)));
       }
       queueEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 10, 10, 0, 0, null, null, new Counters())));
+          TypeConverter.fromYarn(t.jobId), 0, 10, 10, 0, 0, 0, 0, null, null,
+          new Counters())));
 
       handleNextNEvents(jheh, 29);
       verify(mockWriter, times(0)).flush();
@@ -308,22 +309,22 @@ public class TestJobHistoryEventHandler {
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.ERROR.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.ERROR.toString())));
       verify(jheh, times(1)).processDoneFiles(any(JobId.class));
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-        TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
-        new Counters(), new Counters())));
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
+          new Counters(), new Counters())));
       verify(jheh, times(2)).processDoneFiles(any(JobId.class));
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.FAILED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.FAILED.toString())));
       verify(jheh, times(3)).processDoneFiles(any(JobId.class));
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.KILLED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.KILLED.toString())));
       verify(jheh, times(4)).processDoneFiles(any(JobId.class));
 
       mockWriter = jheh.getEventWriter();
@@ -354,22 +355,22 @@ public class TestJobHistoryEventHandler {
       // skip processing done files
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.ERROR.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.ERROR.toString())));
       verify(jheh, times(0)).processDoneFiles(t.jobId);
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
           new Counters(), new Counters())));
       verify(jheh, times(1)).processDoneFiles(t.jobId);
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.FAILED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.FAILED.toString())));
       verify(jheh, times(2)).processDoneFiles(t.jobId);
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.KILLED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.KILLED.toString())));
       verify(jheh, times(3)).processDoneFiles(t.jobId);
 
       mockWriter = jheh.getEventWriter();
@@ -405,7 +406,8 @@ public class TestJobHistoryEventHandler {
               "nmhost", 3000, 4000, -1)));
       handleEvent(jheh, new JobHistoryEvent(params.jobId,
           new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(
-              params.jobId), 0, 0, 0, JobStateInternal.FAILED.toString())));
+              params.jobId), 0, 0, 0, 0, 0, 0, 0,
+              JobStateInternal.FAILED.toString())));
 
       // verify the value of the sensitive property in job.xml is restored.
       Assert.assertEquals(sensitivePropertyName + " is modified.",
@@ -476,7 +478,7 @@ public class TestJobHistoryEventHandler {
           t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000, -1)));
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
           new Counters(), new Counters())));
 
       // If we got here then event handler worked but we don't know with which
@@ -546,7 +548,7 @@ public class TestJobHistoryEventHandler {
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
         new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0,
-          0, 0, JobStateInternal.FAILED.toString())));
+          0, 0, 0, 0, 0, 0, JobStateInternal.FAILED.toString())));
 
       Assert.assertEquals(mi.getJobIndexInfo().getSubmitTime(), 100);
       Assert.assertEquals(mi.getJobIndexInfo().getJobStartTime(), 200);
@@ -642,7 +644,7 @@ public class TestJobHistoryEventHandler {
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
               new JobFinishedEvent(TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0,
-              0, new Counters(), new Counters(), new Counters()), currentTime));
+              0, 0, 0, new Counters(), new Counters(), new Counters()), currentTime));
       entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -668,7 +670,8 @@ public class TestJobHistoryEventHandler {
 
       handleEvent(jheh, new JobHistoryEvent(t.jobId,
             new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId),
-            0, 0, 0, JobStateInternal.KILLED.toString()), currentTime + 20));
+            0, 0, 0, 0, 0, 0, 0, JobStateInternal.KILLED.toString()),
+            currentTime + 20));
       entities = ts.getEntities("MAPREDUCE_JOB", null, null, null,
               null, null, null, null, null, null);
       Assert.assertEquals(1, entities.getEntities().size());
@@ -944,7 +947,7 @@ public class TestJobHistoryEventHandler {
 
       // Job finishes and successfully writes history
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
           new Counters(), new Counters())));
 
       verify(jheh, times(1)).processDoneFiles(any(JobId.class));
@@ -978,7 +981,7 @@ public class TestJobHistoryEventHandler {
 
       // Job finishes, but doesn't successfully write history
       handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
+          TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0, new Counters(),
           new Counters(), new Counters())));
       verify(jheh, times(1)).processDoneFiles(any(JobId.class));
       verify(t.mockAppContext, times(0)).setHistoryUrl(any(String.class));
@@ -1009,8 +1012,8 @@ public class TestJobHistoryEventHandler {
       // Job finishes, but doesn't successfully write history
       try {
         handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
-            TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
-            new Counters(), new Counters())));
+            TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, 0, 0,
+            new Counters(), new Counters(), new Counters())));
         throw new RuntimeException(
             "processDoneFiles didn't throw, but should have");
       } catch (YarnRuntimeException yre) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java
index 1bea5c8..b6d8bbf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobSummary.java
@@ -43,10 +43,12 @@ public class TestJobSummary {
     summary.setFirstMapTaskLaunchTime(4L);
     summary.setFirstReduceTaskLaunchTime(5L);
     summary.setJobFinishTime(6L);
-    summary.setNumFinishedMaps(1);
+    summary.setNumSucceededMaps(1);
     summary.setNumFailedMaps(0);
-    summary.setNumFinishedReduces(1);
+    summary.setNumSucceededReduces(1);
     summary.setNumFailedReduces(0);
+    summary.setNumKilledMaps(0);
+    summary.setNumKilledReduces(0);
     summary.setUser("testUser");
     summary.setQueue("testQueue");
     summary.setJobStatus("testJobStatus");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
index ccacf1c..bfb8d79 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
@@ -640,6 +640,25 @@ public class MockJobs extends MockApps {
       public void setJobPriority(Priority priority) {
         // do nothing
       }
+
+      public int getFailedMaps() {
+        return 0;
+      }
+
+      @Override
+      public int getFailedReduces() {
+        return 0;
+      }
+
+      @Override
+      public int getKilledMaps() {
+        return 0;
+      }
+
+      @Override
+      public int getKilledReduces() {
+        return 0;
+      }
     };
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
index 301d498..e1fa198 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
@@ -533,6 +533,26 @@ public class TestRuntimeEstimators {
     public void setJobPriority(Priority priority) {
       // do nothing
     }
+
+    @Override
+    public int getFailedMaps() {
+      return 0;
+    }
+
+    @Override
+    public int getFailedReduces() {
+      return 0;
+    }
+
+    @Override
+    public int getKilledMaps() {
+      return 0;
+    }
+
+    @Override
+    public int getKilledReduces() {
+      return 0;
+    }
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
index c7b3eb8..b5a4d87 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
@@ -54,7 +54,9 @@
           {"name": "failedReduces", "type": "int"},
           {"name": "totalCounters", "type": "JhCounters"},
           {"name": "mapCounters", "type": "JhCounters"},
-          {"name": "reduceCounters", "type": "JhCounters"}
+          {"name": "reduceCounters", "type": "JhCounters"},
+          {"name": "killedMaps", "type": "int", "default": -1},
+          {"name": "killedReduces", "type": "int", "default": -1}
       ]
      },
 
@@ -136,7 +138,11 @@
           {"name": "finishedMaps", "type": "int"},
           {"name": "finishedReduces", "type": "int"},
           {"name": "jobStatus", "type": "string"},
-          {"name": "diagnostics", "type": ["null","string"], "default": null}
+          {"name": "diagnostics", "type": ["null","string"], "default": null},
+          {"name": "failedMaps", "type": "int", "default": -1},
+          {"name": "failedReduces", "type": "int",  "default": -1},
+          {"name": "killedMaps", "type": "int",  "default": -1},
+          {"name": "killedReduces", "type": "int",  "default": -1}
       ]
      },
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
index 5f10fdf..c25c73e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -327,12 +327,12 @@ public class HistoryViewer {
     /** Generate analysis information for the parsed job */
     public AnalyzedJob (JobInfo job) {
       Map<TaskID, JobHistoryParser.TaskInfo> tasks = job.getAllTasks();
-      int finishedMaps = (int) job.getFinishedMaps();
-      int finishedReduces = (int) job.getFinishedReduces();
+      int succeededMaps = (int) job.getSucceededMaps();
+      int succeededReduces = (int) job.getSucceededReduces();
       mapTasks = 
-        new JobHistoryParser.TaskAttemptInfo[finishedMaps]; 
+        new JobHistoryParser.TaskAttemptInfo[succeededMaps];
       reduceTasks = 
-        new JobHistoryParser.TaskAttemptInfo[finishedReduces]; 
+        new JobHistoryParser.TaskAttemptInfo[succeededReduces];
       int mapIndex = 0 , reduceIndex=0; 
       avgMapTime = 0;
       avgReduceTime = 0;
@@ -360,12 +360,12 @@ public class HistoryViewer {
           }
         }
       }
-      if (finishedMaps > 0) {
-        avgMapTime /= finishedMaps;
+      if (succeededMaps > 0) {
+        avgMapTime /= succeededMaps;
       }
-      if (finishedReduces > 0) {
-        avgReduceTime /= finishedReduces;
-        avgShuffleTime /= finishedReduces;
+      if (succeededReduces > 0) {
+        avgReduceTime /= succeededReduces;
+        avgShuffleTime /= succeededReduces;
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
index d3da9f4..685fa05 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HumanReadableHistoryViewerPrinter.java
@@ -236,7 +236,7 @@ class HumanReadableHistoryViewerPrinter implements HistoryViewerPrinter {
     taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
         dateFormat, ts.setupFinished, ts.setupStarted));
     taskSummary.append("\nMap\t").append(ts.totalMaps);
-    taskSummary.append("\t").append(job.getFinishedMaps());
+    taskSummary.append("\t").append(job.getSucceededMaps());
     taskSummary.append("\t\t").append(ts.numFailedMaps);
     taskSummary.append("\t").append(ts.numKilledMaps);
     taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
@@ -244,7 +244,7 @@ class HumanReadableHistoryViewerPrinter implements HistoryViewerPrinter {
     taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(
         dateFormat, ts.mapFinished, ts.mapStarted));
     taskSummary.append("\nReduce\t").append(ts.totalReduces);
-    taskSummary.append("\t").append(job.getFinishedReduces());
+    taskSummary.append("\t").append(job.getSucceededReduces());
     taskSummary.append("\t\t").append(ts.numFailedReduces);
     taskSummary.append("\t").append(ts.numKilledReduces);
     taskSummary.append("\t").append(StringUtils.getFormattedTimeWithDiff(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
index 456dcf7..cfb6641 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java
@@ -145,7 +145,7 @@ class JSONHistoryViewerPrinter implements HistoryViewerPrinter {
     jSums.put("setup", jSumSetup);
     JSONObject jSumMap = new JSONObject();
     jSumMap.put("total", ts.totalMaps);
-    jSumMap.put("successful", job.getFinishedMaps());
+    jSumMap.put("successful", job.getSucceededMaps());
     jSumMap.put("failed", ts.numFailedMaps);
     jSumMap.put("killed", ts.numKilledMaps);
     jSumMap.put("startTime", ts.mapStarted);
@@ -153,7 +153,7 @@ class JSONHistoryViewerPrinter implements HistoryViewerPrinter {
     jSums.put("map", jSumMap);
     JSONObject jSumReduce = new JSONObject();
     jSumReduce.put("total", ts.totalReduces);
-    jSumReduce.put("successful", job.getFinishedReduces());
+    jSumReduce.put("successful", job.getSucceededReduces());
     jSumReduce.put("failed", ts.numFailedReduces);
     jSumReduce.put("killed", ts.numKilledReduces);
     jSumReduce.put("startTime", ts.reduceStarted);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
index ea21f60..34e4b2c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
@@ -36,16 +36,18 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class JobFinishedEvent  implements HistoryEvent {
+public class JobFinishedEvent implements HistoryEvent {
 
   private JobFinished datum = null;
 
   private JobID jobId;
   private long finishTime;
-  private int finishedMaps;
-  private int finishedReduces;
+  private int succeededMaps;
+  private int succeededReduces;
   private int failedMaps;
   private int failedReduces;
+  private int killedMaps;
+  private int killedReduces;
   private Counters mapCounters;
   private Counters reduceCounters;
   private Counters totalCounters;
@@ -54,8 +56,8 @@ public class JobFinishedEvent  implements HistoryEvent {
    * Create an event to record successful job completion
    * @param id Job ID
    * @param finishTime Finish time of the job
-   * @param finishedMaps The number of finished maps
-   * @param finishedReduces The number of finished reduces
+   * @param succeededMaps The number of succeeded maps
+   * @param succeededReduces The number of succeeded reduces
    * @param failedMaps The number of failed maps
    * @param failedReduces The number of failed reduces
    * @param mapCounters Map Counters for the job
@@ -63,16 +65,19 @@ public class JobFinishedEvent  implements HistoryEvent {
    * @param totalCounters Total Counters for the job
    */
   public JobFinishedEvent(JobID id, long finishTime,
-      int finishedMaps, int finishedReduces,
+      int succeededMaps, int succeededReduces,
       int failedMaps, int failedReduces,
+      int killedMaps, int killedReduces,
       Counters mapCounters, Counters reduceCounters,
       Counters totalCounters) {
     this.jobId = id;
     this.finishTime = finishTime;
-    this.finishedMaps = finishedMaps;
-    this.finishedReduces = finishedReduces;
+    this.succeededMaps = succeededMaps;
+    this.succeededReduces = succeededReduces;
     this.failedMaps = failedMaps;
     this.failedReduces = failedReduces;
+    this.killedMaps = killedMaps;
+    this.killedReduces = killedReduces;
     this.mapCounters = mapCounters;
     this.reduceCounters = reduceCounters;
     this.totalCounters = totalCounters;
@@ -85,10 +90,14 @@ public class JobFinishedEvent  implements HistoryEvent {
       datum = new JobFinished();
       datum.setJobid(new Utf8(jobId.toString()));
       datum.setFinishTime(finishTime);
-      datum.setFinishedMaps(finishedMaps);
-      datum.setFinishedReduces(finishedReduces);
+      // using finishedMaps & finishedReduces in the Avro schema for backward
+      // compatibility
+      datum.setFinishedMaps(succeededMaps);
+      datum.setFinishedReduces(succeededReduces);
       datum.setFailedMaps(failedMaps);
       datum.setFailedReduces(failedReduces);
+      datum.setKilledMaps(killedMaps);
+      datum.setKilledReduces(killedReduces);
       datum.setMapCounters(EventWriter.toAvro(mapCounters, "MAP_COUNTERS"));
       datum.setReduceCounters(EventWriter.toAvro(reduceCounters,
           "REDUCE_COUNTERS"));
@@ -102,10 +111,12 @@ public class JobFinishedEvent  implements HistoryEvent {
     this.datum = (JobFinished) oDatum;
     this.jobId = JobID.forName(datum.getJobid().toString());
     this.finishTime = datum.getFinishTime();
-    this.finishedMaps = datum.getFinishedMaps();
-    this.finishedReduces = datum.getFinishedReduces();
+    this.succeededMaps = datum.getFinishedMaps();
+    this.succeededReduces = datum.getFinishedReduces();
     this.failedMaps = datum.getFailedMaps();
     this.failedReduces = datum.getFailedReduces();
+    this.killedMaps = datum.getKilledMaps();
+    this.killedReduces = datum.getKilledReduces();
     this.mapCounters = EventReader.fromAvro(datum.getMapCounters());
     this.reduceCounters = EventReader.fromAvro(datum.getReduceCounters());
     this.totalCounters = EventReader.fromAvro(datum.getTotalCounters());
@@ -120,13 +131,17 @@ public class JobFinishedEvent  implements HistoryEvent {
   /** Get the job finish time */
   public long getFinishTime() { return finishTime; }
   /** Get the number of finished maps for the job */
-  public int getFinishedMaps() { return finishedMaps; }
+  public int getSucceededMaps() { return succeededMaps; }
   /** Get the number of finished reducers for the job */
-  public int getFinishedReduces() { return finishedReduces; }
+  public int getSucceededReduces() { return succeededReduces; }
   /** Get the number of failed maps for the job */
   public int getFailedMaps() { return failedMaps; }
   /** Get the number of failed reducers for the job */
   public int getFailedReduces() { return failedReduces; }
+  /** Get the number of killed maps */
+  public int getKilledMaps() { return killedMaps; }
+  /** Get the number of killed reduces */
+  public int getKilledReduces() { return killedReduces; }
   /** Get the counters for the job */
   public Counters getTotalCounters() {
     return totalCounters;
@@ -145,12 +160,16 @@ public class JobFinishedEvent  implements HistoryEvent {
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setId(StringUtils.toUpperCase(getEventType().name()));
     tEvent.addInfo("FINISH_TIME", getFinishTime());
-    tEvent.addInfo("NUM_MAPS", getFinishedMaps());
-    tEvent.addInfo("NUM_REDUCES", getFinishedReduces());
+    tEvent.addInfo("NUM_MAPS", getSucceededMaps() + getFailedMaps()
+        + getKilledMaps());
+    tEvent.addInfo("NUM_REDUCES", getSucceededReduces() + getFailedReduces()
+        + getKilledReduces());
     tEvent.addInfo("FAILED_MAPS", getFailedMaps());
     tEvent.addInfo("FAILED_REDUCES", getFailedReduces());
-    tEvent.addInfo("FINISHED_MAPS", getFinishedMaps());
-    tEvent.addInfo("FINISHED_REDUCES", getFinishedReduces());
+    tEvent.addInfo("SUCCESSFUL_MAPS", getSucceededMaps());
+    tEvent.addInfo("SUCCESSFUL_REDUCES", getSucceededReduces());
+    tEvent.addInfo("KILLED_MAPS", getKilledMaps());
+    tEvent.addInfo("KILLED_REDUCES", getKilledReduces());
     // TODO replace SUCCEEDED with JobState.SUCCEEDED.toString()
     tEvent.addInfo("JOB_STATUS", "SUCCEEDED");
     return tEvent;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
index 07699fd..28fcc92 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
@@ -376,18 +376,24 @@ public class JobHistoryParser implements HistoryEventHandler {
 
   private void handleJobFailedEvent(JobUnsuccessfulCompletionEvent event) {
     info.finishTime = event.getFinishTime();
-    info.finishedMaps = event.getFinishedMaps();
-    info.finishedReduces = event.getFinishedReduces();
+    info.succeededMaps = event.getSucceededMaps();
+    info.succeededReduces = event.getSucceededReduces();
+    info.failedMaps = event.getFailedMaps();
+    info.failedReduces = event.getFailedReduces();
+    info.killedMaps = event.getKilledMaps();
+    info.killedReduces = event.getKilledReduces();
     info.jobStatus = StringInterner.weakIntern(event.getStatus());
     info.errorInfo = StringInterner.weakIntern(event.getDiagnostics());
   }
 
   private void handleJobFinishedEvent(JobFinishedEvent event) {
     info.finishTime = event.getFinishTime();
-    info.finishedMaps = event.getFinishedMaps();
-    info.finishedReduces = event.getFinishedReduces();
+    info.succeededMaps = event.getSucceededMaps();
+    info.succeededReduces = event.getSucceededReduces();
     info.failedMaps = event.getFailedMaps();
     info.failedReduces = event.getFailedReduces();
+    info.killedMaps = event.getKilledMaps();
+    info.killedReduces = event.getKilledReduces();
     info.totalCounters = event.getTotalCounters();
     info.mapCounters = event.getMapCounters();
     info.reduceCounters = event.getReduceCounters();
@@ -456,8 +462,10 @@ public class JobHistoryParser implements HistoryEventHandler {
     int totalReduces;
     int failedMaps;
     int failedReduces;
-    int finishedMaps;
-    int finishedReduces;
+    int succeededMaps;
+    int succeededReduces;
+    int killedMaps;
+    int killedReduces;
     String jobStatus;
     Counters totalCounters;
     Counters mapCounters;
@@ -477,7 +485,7 @@ public class JobHistoryParser implements HistoryEventHandler {
     public JobInfo() {
       submitTime = launchTime = finishTime = -1;
       totalMaps = totalReduces = failedMaps = failedReduces = 0;
-      finishedMaps = finishedReduces = 0;
+      succeededMaps = succeededReduces = 0;
       username = jobname = jobConfPath = jobQueueName = "";
       tasksMap = new HashMap<TaskID, TaskInfo>();
       completedTaskAttemptsMap = new HashMap<TaskAttemptID, TaskAttemptInfo>();
@@ -540,10 +548,14 @@ public class JobHistoryParser implements HistoryEventHandler {
     public long getFailedMaps() { return failedMaps; }
     /** @return the number of failed reduces */
     public long getFailedReduces() { return failedReduces; }
-    /** @return the number of finished maps */
-    public long getFinishedMaps() { return finishedMaps; }
-    /** @return the number of finished reduces */
-    public long getFinishedReduces() { return finishedReduces; }
+    /** @return the number of killed maps */
+    public long getKilledMaps() { return killedMaps; }
+    /** @return the number of killed reduces */
+    public long getKilledReduces() { return killedReduces; }
+    /** @return the number of succeeded maps */
+    public long getSucceededMaps() { return succeededMaps; }
+    /** @return the number of succeeded reduces */
+    public long getSucceededReduces() { return succeededReduces; }
     /** @return the job status */
     public String getJobStatus() { return jobStatus; }
     public String getErrorInfo() { return errorInfo; }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
index ce6fa32..da31591 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
@@ -49,34 +49,58 @@ public class JobUnsuccessfulCompletionEvent implements HistoryEvent {
    * Create an event to record unsuccessful completion (killed/failed) of jobs
    * @param id Job ID
    * @param finishTime Finish time of the job
-   * @param finishedMaps Number of finished maps
-   * @param finishedReduces Number of finished reduces
+   * @param succeededMaps Number of succeeded maps
+   * @param succeededReduces Number of succeeded reduces
+   * @param failedMaps Number of failed maps
+   * @param failedReduces Number of failed reduces
+   * @param killedMaps Number of killed maps
+   * @param killedReduces Number of killed reduces
    * @param status Status of the job
    */
   public JobUnsuccessfulCompletionEvent(JobID id, long finishTime,
-      int finishedMaps,
-      int finishedReduces, String status) {
-    this(id, finishTime, finishedMaps, finishedReduces, status, NODIAGS_LIST);
+      int succeededMaps,
+      int succeededReduces,
+      int failedMaps,
+      int failedReduces,
+      int killedMaps,
+      int killedReduces,
+      String status) {
+    this(id, finishTime, succeededMaps, succeededReduces, failedMaps,
+            failedReduces, killedMaps, killedReduces, status, NODIAGS_LIST);
   }
 
   /**
    * Create an event to record unsuccessful completion (killed/failed) of jobs
    * @param id Job ID
    * @param finishTime Finish time of the job
-   * @param finishedMaps Number of finished maps
-   * @param finishedReduces Number of finished reduces
+   * @param succeededMaps Number of finished maps
+   * @param succeededReduces Number of finished reduces
+   * @param failedMaps Number of failed maps
+   * @param failedReduces Number of failed reduces
+   * @param killedMaps Number of killed maps
+   * @param killedReduces Number of killed reduces
    * @param status Status of the job
    * @param diagnostics job runtime diagnostics
    */
   public JobUnsuccessfulCompletionEvent(JobID id, long finishTime,
-      int finishedMaps,
-      int finishedReduces,
+      int succeededMaps,
+      int succeededReduces,
+      int failedMaps,
+      int failedReduces,
+      int killedMaps,
+      int killedReduces,
       String status,
       Iterable<String> diagnostics) {
     datum.setJobid(new Utf8(id.toString()));
     datum.setFinishTime(finishTime);
-    datum.setFinishedMaps(finishedMaps);
-    datum.setFinishedReduces(finishedReduces);
+    // using finishedMaps & finishedReduces in the Avro schema for backward
+    // compatibility
+    datum.setFinishedMaps(succeededMaps);
+    datum.setFinishedReduces(succeededReduces);
+    datum.setFailedMaps(failedMaps);
+    datum.setFailedReduces(failedReduces);
+    datum.setKilledMaps(killedMaps);
+    datum.setKilledReduces(killedReduces);
     datum.setJobStatus(new Utf8(status));
     if (diagnostics == null) {
       diagnostics = NODIAGS_LIST;
@@ -98,10 +122,19 @@ public class JobUnsuccessfulCompletionEvent implements HistoryEvent {
   }
   /** Get the job finish time */
   public long getFinishTime() { return datum.getFinishTime(); }
-  /** Get the number of finished maps */
-  public int getFinishedMaps() { return datum.getFinishedMaps(); }
-  /** Get the number of finished reduces */
-  public int getFinishedReduces() { return datum.getFinishedReduces(); }
+  /** Get the number of succeeded maps */
+  public int getSucceededMaps() { return datum.getFinishedMaps(); }
+  /** Get the number of succeeded reduces */
+  public int getSucceededReduces() { return datum.getFinishedReduces(); }
+  /** Get the number of failed maps */
+  public int getFailedMaps() { return datum.getFailedMaps(); }
+  /** Get the number of failed reduces */
+  public int getFailedReduces() { return datum.getFailedReduces(); }
+  /** Get the number of killed maps */
+  public int getKilledMaps() { return datum.getKilledMaps(); }
+  /** Get the number of killed reduces */
+  public int getKilledReduces() { return datum.getKilledReduces(); }
+
   /** Get the status */
   public String getStatus() { return datum.getJobStatus().toString(); }
   /** Get the event type */
@@ -129,12 +162,19 @@ public class JobUnsuccessfulCompletionEvent implements HistoryEvent {
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setId(StringUtils.toUpperCase(getEventType().name()));
     tEvent.addInfo("FINISH_TIME", getFinishTime());
-    tEvent.addInfo("NUM_MAPS", getFinishedMaps());
-    tEvent.addInfo("NUM_REDUCES", getFinishedReduces());
+    tEvent.addInfo("NUM_MAPS", getSucceededMaps() + getFailedMaps()
+        + getKilledMaps());
+    tEvent.addInfo("NUM_REDUCES", getSucceededReduces() + getFailedReduces()
+        + getKilledReduces());
     tEvent.addInfo("JOB_STATUS", getStatus());
     tEvent.addInfo("DIAGNOSTICS", getDiagnostics());
-    tEvent.addInfo("FINISHED_MAPS", getFinishedMaps());
-    tEvent.addInfo("FINISHED_REDUCES", getFinishedReduces());
+    tEvent.addInfo("SUCCESSFUL_MAPS", getSucceededMaps());
+    tEvent.addInfo("SUCCESSFUL_REDUCES", getSucceededReduces());
+    tEvent.addInfo("FAILED_MAPS", getFailedMaps());
+    tEvent.addInfo("FAILED_REDUCES", getFailedReduces());
+    tEvent.addInfo("KILLED_MAPS", getKilledMaps());
+    tEvent.addInfo("KILLED_REDUCES", getKilledReduces());
+
     return tEvent;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
index 358e9b2..2e2dbe1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestHistoryViewerPrinter.java
@@ -883,8 +883,8 @@ public class TestHistoryViewerPrinter {
     job.totalReduces = 1;
     job.failedMaps = 1;
     job.failedReduces = 0;
-    job.finishedMaps = 5;
-    job.finishedReduces = 1;
+    job.succeededMaps = 5;
+    job.succeededReduces = 1;
     job.jobStatus = JobStatus.State.SUCCEEDED.name();
     job.totalCounters = createCounters();
     job.mapCounters = createCounters();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
index bbb126d..5afb645 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -56,7 +56,6 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
-import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -71,7 +70,11 @@ import org.apache.hadoop.yarn.util.Records;
  * Data from job history file is loaded lazily.
  */
 public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
-  
+  // Backward compatibility: if the failed or killed map/reduce
+  // count is -1, that means the value was not recorded
+  // so we count it as 0
+  private static final int UNDEFINED_VALUE = -1;
+
   static final Log LOG = LogFactory.getLog(CompletedJob.class);
   private final Configuration conf;
   private final JobId jobId; //Can be picked from JobInfo with a conversion.
@@ -104,12 +107,36 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
 
   @Override
   public int getCompletedMaps() {
-    return (int) jobInfo.getFinishedMaps();
+    int killedMaps = (int) jobInfo.getKilledMaps();
+    int failedMaps = (int) jobInfo.getFailedMaps();
+
+    if (killedMaps == UNDEFINED_VALUE) {
+      killedMaps = 0;
+    }
+
+    if (failedMaps == UNDEFINED_VALUE) {
+      failedMaps = 0;
+    }
+
+    return (int) (jobInfo.getSucceededMaps() +
+        killedMaps + failedMaps);
   }
 
   @Override
   public int getCompletedReduces() {
-    return (int) jobInfo.getFinishedReduces();
+    int killedReduces = (int) jobInfo.getKilledReduces();
+    int failedReduces = (int) jobInfo.getFailedReduces();
+
+    if (killedReduces == UNDEFINED_VALUE) {
+      killedReduces = 0;
+    }
+
+    if (failedReduces == UNDEFINED_VALUE) {
+      failedReduces = 0;
+    }
+
+    return (int) (jobInfo.getSucceededReduces() +
+        killedReduces + failedReduces);
   }
 
   @Override
@@ -481,4 +508,24 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
     throw new UnsupportedOperationException(
         "Can't set job's priority in history");
   }
+
+  @Override
+  public int getFailedMaps() {
+    return (int) jobInfo.getFailedMaps();
+  }
+
+  @Override
+  public int getFailedReduces() {
+    return (int) jobInfo.getFailedReduces();
+  }
+
+  @Override
+  public int getKilledMaps() {
+    return (int) jobInfo.getKilledMaps();
+  }
+
+  @Override
+  public int getKilledReduces() {
+    return (int) jobInfo.getKilledReduces();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
index b3b181c..b14f0c3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
@@ -203,4 +203,23 @@ public class PartialJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
         "Can't set job's priority in history");
   }
 
+  @Override
+  public int getFailedMaps() {
+    return -1;
+  }
+
+  @Override
+  public int getFailedReduces() {
+    return -1;
+  }
+
+  @Override
+  public int getKilledMaps() {
+    return -1;
+  }
+
+  @Override
+  public int getKilledReduces() {
+    return -1;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java
index cea336c..ecc4945 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/UnparsedJob.java
@@ -208,4 +208,24 @@ public class UnparsedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
     throw new UnsupportedOperationException(
         "Can't set job's priority in history");
   }
+
+  @Override
+  public int getFailedMaps() {
+    return -1;
+  }
+
+  @Override
+  public int getFailedReduces() {
+    return -1;
+  }
+
+  @Override
+  public int getKilledMaps() {
+    return -1;
+  }
+
+  @Override
+  public int getKilledReduces() {
+    return -1;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
index 84b1c6d..e881b37 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java
@@ -79,9 +79,12 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
 import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
 import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
 import org.apache.hadoop.net.DNSToSwitchMapping;
@@ -292,7 +295,7 @@ public class TestJobHistoryParsing {
     Assert.assertEquals("incorrect finishedMap ", numSuccessfulMaps,
         numFinishedMaps);
     Assert.assertEquals("incorrect finishedReduces ", numReduces,
-        jobInfo.getFinishedReduces());
+        jobInfo.getSucceededReduces());
     Assert.assertEquals("incorrect uberized ", job.isUber(),
         jobInfo.getUberized());
     Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
@@ -379,7 +382,7 @@ public class TestJobHistoryParsing {
   private long computeFinishedMaps(JobInfo jobInfo, int numMaps,
       int numSuccessfulMaps) {
     if (numMaps == numSuccessfulMaps) {
-      return jobInfo.getFinishedMaps();
+      return jobInfo.getSucceededMaps();
     }
 
     long numFinishedMaps = 0;
@@ -458,6 +461,76 @@ public class TestJobHistoryParsing {
     }
   }
 
+  @Test(timeout = 30000)
+  public void testHistoryParsingForKilledAndFailedAttempts() throws Exception {
+    MRApp app = null;
+    JobHistory jobHistory = null;
+    LOG.info("STARTING testHistoryParsingForKilledAndFailedAttempts");
+    try {
+      Configuration conf = new Configuration();
+      conf.setClass(
+          NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+          MyResolver.class, DNSToSwitchMapping.class);
+      conf.set(JHAdminConfig.MR_HS_JHIST_FORMAT, "json");
+      // "CommitterEventHandler" thread could be slower in some cases,
+      // which might cause a failed map/reduce task to fail the job
+      // immediately (see JobImpl.checkJobAfterTaskCompletion()). If there are
+      // killed events in progress, those will not be counted. Instead,
+      // we allow a 50% failure rate, so the job will always succeed and kill
+      // events will not be ignored.
+      conf.setInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 50);
+      conf.setInt(MRJobConfig.REDUCE_FAILURES_MAXPERCENT, 50);
+      RackResolver.init(conf);
+      app = new MRAppWithHistoryWithFailedAndKilledTask(3, 3, true, this
+          .getClass().getName(), true);
+      app.submit(conf);
+      Job job = app.getContext().getAllJobs().values().iterator().next();
+      JobId jobId = job.getID();
+      app.waitForState(job, JobState.SUCCEEDED);
+
+      // make sure all events are flushed
+      app.waitForState(Service.STATE.STOPPED);
+
+      jobHistory = new JobHistory();
+      jobHistory.init(conf);
+      HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
+
+      JobHistoryParser parser;
+      JobInfo jobInfo;
+      synchronized (fileInfo) {
+        Path historyFilePath = fileInfo.getHistoryFile();
+        FSDataInputStream in = null;
+        FileContext fc = null;
+        try {
+          fc = FileContext.getFileContext(conf);
+          in = fc.open(fc.makeQualified(historyFilePath));
+        } catch (IOException ioe) {
+          LOG.info("Can not open history file: " + historyFilePath, ioe);
+          throw (new Exception("Can not open History File"));
+        }
+
+        parser = new JobHistoryParser(in);
+        jobInfo = parser.parse();
+      }
+      Exception parseException = parser.getParseException();
+      Assert.assertNull("Caught an expected exception " + parseException,
+          parseException);
+
+      assertEquals("FailedMaps", 1, jobInfo.getFailedMaps());
+      assertEquals("KilledMaps", 1, jobInfo.getKilledMaps());
+      assertEquals("FailedReduces", 1, jobInfo.getFailedReduces());
+      assertEquals("KilledReduces", 1, jobInfo.getKilledReduces());
+    } finally {
+      LOG.info("FINISHED testHistoryParsingForKilledAndFailedAttempts");
+      if (app != null) {
+        app.close();
+      }
+      if (jobHistory != null) {
+        jobHistory.close();
+      }
+    }
+  }
+
   @Test(timeout = 60000)
   public void testCountersForFailedTask() throws Exception {
     LOG.info("STARTING testCountersForFailedTask");
@@ -666,6 +739,40 @@ public class TestJobHistoryParsing {
     }
   }
 
+  static class MRAppWithHistoryWithFailedAndKilledTask
+    extends MRAppWithHistory {
+
+    MRAppWithHistoryWithFailedAndKilledTask(int maps, int reduces,
+        boolean autoComplete, String testName, boolean cleanOnStart) {
+      super(maps, reduces, autoComplete, testName, cleanOnStart);
+    }
+
+    @Override
+    protected void attemptLaunched(TaskAttemptId attemptID) {
+      final int taskId = attemptID.getTaskId().getId();
+      final TaskType taskType = attemptID.getTaskId().getTaskType();
+
+      // map #0 --> kill
+      // reduce #0 --> fail
+      if (taskType == TaskType.MAP && taskId == 0) {
+        getContext().getEventHandler().handle(
+            new TaskEvent(attemptID.getTaskId(), TaskEventType.T_KILL));
+      } else if (taskType == TaskType.MAP && taskId == 1) {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+      } else if (taskType == TaskType.REDUCE && taskId == 0) {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+      } else if (taskType == TaskType.REDUCE && taskId == 1) {
+        getContext().getEventHandler().handle(
+            new TaskEvent(attemptID.getTaskId(), TaskEventType.T_KILL));
+      } else {
+        getContext().getEventHandler().handle(
+            new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_DONE));
+      }
+    }
+  }
+
   static class MRAppWithHistoryWithJobKilled extends MRAppWithHistory {
 
     public MRAppWithHistoryWithJobKilled(int maps, int reduces,
@@ -864,6 +971,7 @@ public class TestJobHistoryParsing {
             if (eventId < 5) {
               JobUnsuccessfulCompletionEvent juce =
                   new JobUnsuccessfulCompletionEvent(jid, 100L, 2, 0,
+                          0, 0, 0, 0,
                       "JOB_FAILED", Collections.singletonList(
                           "Task failed: " + tids[0].toString()));
               return juce;
@@ -907,9 +1015,9 @@ public class TestJobHistoryParsing {
           (new Configuration()), histPath);
       JobInfo jobInfo = parser.parse(); 
       LOG.info(" job info: " + jobInfo.getJobname() + " "
-        + jobInfo.getFinishedMaps() + " " 
-        + jobInfo.getTotalMaps() + " " 
-        + jobInfo.getJobId() ) ;
+          + jobInfo.getSucceededMaps() + " "
+          + jobInfo.getTotalMaps() + " "
+          + jobInfo.getJobId() ) ;
     }
   
   /**
@@ -925,7 +1033,7 @@ public class TestJobHistoryParsing {
           (new Configuration()), histPath);
       JobInfo jobInfo = parser.parse(); 
       LOG.info(" job info: " + jobInfo.getJobname() + " "
-        + jobInfo.getFinishedMaps() + " "
+        + jobInfo.getSucceededMaps() + " "
         + jobInfo.getTotalMaps() + " "
         + jobInfo.getJobId() );
     }
@@ -943,7 +1051,7 @@ public class TestJobHistoryParsing {
           (new Configuration()), histPath);
       JobInfo jobInfo = parser.parse(); 
       LOG.info(" job info: " + jobInfo.getJobname() + " "
-        + jobInfo.getFinishedMaps() + " " 
+        + jobInfo.getSucceededMaps() + " "
         + jobInfo.getTotalMaps() + " " 
         + jobInfo.getJobId() ) ;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
index 14961d2..867c661 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
@@ -424,5 +424,25 @@ public class TestHsWebServicesAcls {
     @Override
     public void setJobPriority(Priority priority) {
     }
+
+    @Override
+    public int getFailedMaps() {
+      return mockJob.getFailedMaps();
+    }
+
+    @Override
+    public int getFailedReduces() {
+      return mockJob.getFailedReduces();
+    }
+
+    @Override
+    public int getKilledMaps() {
+      return mockJob.getKilledMaps();
+    }
+
+    @Override
+    public int getKilledReduces() {
+      return mockJob.getKilledReduces();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d04f85f3/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
index a8497f4..ef32db4 100644
--- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
+++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/Job20LineHistoryEventEmitter.java
@@ -223,7 +223,7 @@ public class Job20LineHistoryEventEmitter extends HistoryEventEmitter {
           && finishedReduces != null) {
         return new JobUnsuccessfulCompletionEvent(jobID, Long
             .parseLong(finishTime), Integer.parseInt(finishedMaps), Integer
-            .parseInt(finishedReduces), status);
+            .parseInt(finishedReduces), -1, -1, -1, -1, status);
       }
 
       return null;
@@ -256,8 +256,8 @@ public class Job20LineHistoryEventEmitter extends HistoryEventEmitter {
           && finishedReduces != null) {
         return new JobFinishedEvent(jobID, Long.parseLong(finishTime), Integer
             .parseInt(finishedMaps), Integer.parseInt(finishedReduces), Integer
-            .parseInt(failedMaps), Integer.parseInt(failedReduces), null, null,
-            maybeParseCounters(counters));
+            .parseInt(failedMaps), Integer.parseInt(failedReduces), -1, -1,
+            null, null, maybeParseCounters(counters));
       }
 
       return null;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: MAPREDUCE-6838. Addendum to fix code comment

Posted by st...@apache.org.
MAPREDUCE-6838. Addendum to fix code comment


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16ba4f54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16ba4f54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16ba4f54

Branch: refs/heads/HADOOP-13345
Commit: 16ba4f544f13d614c1ebd6101ee14f7714e0fc54
Parents: 08f40bc
Author: Varun Saxena <va...@apache.org>
Authored: Tue Aug 22 18:36:21 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 .../apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ba4f54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
index 97d1364..220d6af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
@@ -187,8 +187,7 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
     if (collectorAddr == null || collectorAddr.isEmpty()) {
       collectorAddr = timelineServiceAddress;
     }
-    // Token need not be updated if either address or token service does not
-    // exist.
+    // Token need not be updated if both address and token service do not exist.
     String service = delegationToken.getService();
     if ((service == null || service.isEmpty()) &&
         (collectorAddr == null || collectorAddr.isEmpty())) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: YARN-6861. Reader API for sub application entities (Rohith Sharma K S via Varun Saxena)

Posted by st...@apache.org.
YARN-6861. Reader API for sub application entities (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2efebdd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2efebdd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2efebdd

Branch: refs/heads/HADOOP-13345
Commit: b2efebdd077ecb7b6ffe7fb8a957dadb0e78290f
Parents: 7fd6ae2
Author: Varun Saxena <va...@apache.org>
Authored: Sun Aug 20 00:35:14 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 ...stTimelineReaderWebServicesHBaseStorage.java | 104 ++--
 .../storage/reader/GenericEntityReader.java     |   4 +-
 .../reader/SubApplicationEntityReader.java      | 488 +++++++++++++++++++
 .../reader/TimelineEntityReaderFactory.java     |   3 +
 .../reader/TimelineReaderContext.java           |  19 +-
 .../reader/TimelineReaderManager.java           |   9 +-
 .../reader/TimelineReaderWebServices.java       | 158 ++++++
 .../reader/TimelineReaderWebServicesUtils.java  |  10 +
 .../reader/TimelineUIDConverter.java            |  35 ++
 .../reader/TestTimelineUIDConverter.java        |   9 +
 10 files changed, 787 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index b589206..5acf1f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -71,6 +71,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
   private static long ts = System.currentTimeMillis();
   private static long dayTs =
       HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+  private static String doAsUser = "remoteuser";
 
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
@@ -337,7 +338,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
     HBaseTimelineWriterImpl hbi = null;
     Configuration c1 = getHBaseTestingUtility().getConfiguration();
     UserGroupInformation remoteUser =
-        UserGroupInformation.createRemoteUser(user);
+        UserGroupInformation.createRemoteUser(doAsUser);
     try {
       hbi = new HBaseTimelineWriterImpl();
       hbi.init(c1);
@@ -2263,60 +2264,69 @@ public class TestTimelineReaderWebServicesHBaseStorage
   public void testGenericEntitiesForPagination() throws Exception {
     Client client = createClient();
     try {
-      int limit = 10;
-      String queryParam = "?limit=" + limit;
       String resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
           + "timeline/clusters/cluster1/apps/application_1111111111_1111/"
           + "entities/entitytype";
-      URI uri = URI.create(resourceUri + queryParam);
-
-      ClientResponse resp = getResponse(client, uri);
-      List<TimelineEntity> entities =
-          resp.getEntity(new GenericType<List<TimelineEntity>>() {
-          });
-      // verify for entity-10 to entity-1 in descending order.
-      verifyPaginatedEntites(entities, limit, limit);
-
-      limit = 4;
-      queryParam = "?limit=" + limit;
-      uri = URI.create(resourceUri + queryParam);
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
-      });
-      // verify for entity-10 to entity-7 in descending order.
-      TimelineEntity entity = verifyPaginatedEntites(entities, limit, 10);
-
-      queryParam = "?limit=" + limit + "&fromid="
-          + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
-      uri = URI.create(resourceUri + queryParam);
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
-      });
-      // verify for entity-7 to entity-4 in descending order.
-      entity = verifyPaginatedEntites(entities, limit, 7);
-
-      queryParam = "?limit=" + limit + "&fromid="
-          + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
-      uri = URI.create(resourceUri + queryParam);
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
-      });
-      // verify for entity-4 to entity-1 in descending order.
-      entity = verifyPaginatedEntites(entities, limit, 4);
-
-      queryParam = "?limit=" + limit + "&fromid="
-          + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
-      uri = URI.create(resourceUri + queryParam);
-      resp = getResponse(client, uri);
-      entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
-      });
-      // always entity-1 will be retrieved
-      entity = verifyPaginatedEntites(entities, 1, 1);
+      verifyEntitiesForPagination(client, resourceUri);
+      resourceUri = "http://localhost:" + getServerPort() + "/ws/v2/"
+          + "timeline/clusters/cluster1/users/" + doAsUser
+          + "/entities/entitytype";
+      verifyEntitiesForPagination(client, resourceUri);
     } finally {
       client.destroy();
     }
   }
 
+  private void verifyEntitiesForPagination(Client client, String resourceUri)
+      throws Exception {
+    int limit = 10;
+    String queryParam = "?limit=" + limit;
+    URI uri = URI.create(resourceUri + queryParam);
+
+    ClientResponse resp = getResponse(client, uri);
+    List<TimelineEntity> entities =
+        resp.getEntity(new GenericType<List<TimelineEntity>>() {
+        });
+    // verify for entity-10 to entity-1 in descending order.
+    verifyPaginatedEntites(entities, limit, limit);
+
+    limit = 4;
+    queryParam = "?limit=" + limit;
+    uri = URI.create(resourceUri + queryParam);
+    resp = getResponse(client, uri);
+    entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+    });
+    // verify for entity-10 to entity-7 in descending order.
+    TimelineEntity entity = verifyPaginatedEntites(entities, limit, 10);
+
+    queryParam = "?limit=" + limit + "&fromid="
+        + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+    uri = URI.create(resourceUri + queryParam);
+    resp = getResponse(client, uri);
+    entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+    });
+    // verify for entity-7 to entity-4 in descending order.
+    entity = verifyPaginatedEntites(entities, limit, 7);
+
+    queryParam = "?limit=" + limit + "&fromid="
+        + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+    uri = URI.create(resourceUri + queryParam);
+    resp = getResponse(client, uri);
+    entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+    });
+    // verify for entity-4 to entity-1 in descending order.
+    entity = verifyPaginatedEntites(entities, limit, 4);
+
+    queryParam = "?limit=" + limit + "&fromid="
+        + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
+    uri = URI.create(resourceUri + queryParam);
+    resp = getResponse(client, uri);
+    entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
+    });
+    // always entity-1 will be retrieved
+    entity = verifyPaginatedEntites(entities, 1, 1);
+  }
+
   private TimelineEntity verifyPaginatedEntites(List<TimelineEntity> entities,
       int limit, int startFrom) {
     assertNotNull(entities);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
index d7aca74..3a44445 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -135,7 +135,7 @@ class GenericEntityReader extends TimelineEntityReader {
    *
    * @return true if we need to fetch some of the columns, false otherwise.
    */
-  private boolean fetchPartialEventCols(TimelineFilterList eventFilters,
+  protected boolean fetchPartialEventCols(TimelineFilterList eventFilters,
       EnumSet<Field> fieldsToRetrieve) {
     return (eventFilters != null && !eventFilters.getFilterList().isEmpty() &&
         !hasField(fieldsToRetrieve, Field.EVENTS));
@@ -146,7 +146,7 @@ class GenericEntityReader extends TimelineEntityReader {
    *
    * @return true if we need to fetch some of the columns, false otherwise.
    */
-  private boolean fetchPartialRelatesToCols(TimelineFilterList relatesTo,
+  protected boolean fetchPartialRelatesToCols(TimelineFilterList relatesTo,
       EnumSet<Field> fieldsToRetrieve) {
     return (relatesTo != null && !relatesTo.getFilterList().isEmpty() &&
         !hasField(fieldsToRetrieve, Field.RELATES_TO));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
new file mode 100644
index 0000000..e780dcc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
@@ -0,0 +1,488 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Query;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+class SubApplicationEntityReader extends GenericEntityReader {
+  private static final SubApplicationTable SUB_APPLICATION_TABLE =
+      new SubApplicationTable();
+
+  SubApplicationEntityReader(TimelineReaderContext ctxt,
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, entityFilters, toRetrieve);
+  }
+
+  SubApplicationEntityReader(TimelineReaderContext ctxt,
+      TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link SubApplicationTable}.
+   */
+  protected BaseTable<?> getTable() {
+    return SUB_APPLICATION_TABLE;
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFilters() throws IOException {
+    // Filters here cannot be null for multiple entity reads as they are set in
+    // augmentParams if null.
+    FilterList listBasedOnFilters = new FilterList();
+    TimelineEntityFilters filters = getFilters();
+    // Create filter list based on created time range and add it to
+    // listBasedOnFilters.
+    long createdTimeBegin = filters.getCreatedTimeBegin();
+    long createdTimeEnd = filters.getCreatedTimeEnd();
+    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils
+          .createSingleColValueFiltersByRange(SubApplicationColumn.CREATED_TIME,
+              createdTimeBegin, createdTimeEnd));
+    }
+    // Create filter list based on metric filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList metricFilters = filters.getMetricFilters();
+    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          SubApplicationColumnPrefix.METRIC, metricFilters));
+    }
+    // Create filter list based on config filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList configFilters = filters.getConfigFilters();
+    if (configFilters != null && !configFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          SubApplicationColumnPrefix.CONFIG, configFilters));
+    }
+    // Create filter list based on info filters and add it to listBasedOnFilters
+    TimelineFilterList infoFilters = filters.getInfoFilters();
+    if (infoFilters != null && !infoFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils
+          .createHBaseFilterList(SubApplicationColumnPrefix.INFO, infoFilters));
+    }
+    return listBasedOnFilters;
+  }
+
+  /**
+   * Add {@link QualifierFilter} filters to filter list for each column of
+   * entity table.
+   *
+   * @param list filter list to which qualifier filters have to be added.
+   */
+  protected void updateFixedColumns(FilterList list) {
+    for (SubApplicationColumn column : SubApplicationColumn.values()) {
+      list.addFilter(new QualifierFilter(CompareOp.EQUAL,
+          new BinaryComparator(column.getColumnQualifierBytes())));
+    }
+  }
+
+  /**
+   * Creates a filter list which indicates that only some of the column
+   * qualifiers in the info column family will be returned in result.
+   *
+   * @param isApplication If true, it means operations are to be performed for
+   *          application table, otherwise for entity table.
+   * @return filter list.
+   * @throws IOException if any problem occurs while creating filter list.
+   */
+  private FilterList createFilterListForColsOfInfoFamily() throws IOException {
+    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
+    // Add filters for each column in entity table.
+    updateFixedColumns(infoFamilyColsFilter);
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // If INFO field has to be retrieved, add a filter for fetching columns
+    // with INFO column prefix.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.INFO));
+    }
+    TimelineFilterList relatesTo = getFilters().getRelatesTo();
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      // If RELATES_TO field has to be retrieved, add a filter for fetching
+      // columns with RELATES_TO column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.RELATES_TO));
+    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain RELATES_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // relatesTo filters are specified. relatesTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> relatesToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.RELATES_TO, relatesToCols));
+    }
+    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
+      // columns with IS_RELATED_TO column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.IS_RELATED_TO));
+    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain IS_RELATED_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // isRelatedTo filters are specified. isRelatedTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> isRelatedToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
+    }
+    TimelineFilterList eventFilters = getFilters().getEventFilters();
+    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
+      // If EVENTS field has to be retrieved, add a filter for fetching columns
+      // with EVENT column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.EVENT));
+    } else if (eventFilters != null
+        && !eventFilters.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain EVENTS, we still need to
+      // have a filter to fetch some of the column qualifiers on the basis of
+      // event filters specified. Event filters will then be matched after
+      // fetching rows from HBase.
+      Set<String> eventCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.EVENT, eventCols));
+    }
+    return infoFamilyColsFilter;
+  }
+
+  /**
+   * Exclude column prefixes via filters which are not required(based on fields
+   * to retrieve) from info column family. These filters are added to filter
+   * list which contains a filter for getting info column family.
+   *
+   * @param infoColFamilyList filter list for info column family.
+   */
+  private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // Events not required.
+    if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.EVENT));
+    }
+    // info not required.
+    if (!hasField(fieldsToRetrieve, Field.INFO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.INFO));
+    }
+    // is related to not required.
+    if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.IS_RELATED_TO));
+    }
+    // relates to not required.
+    if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.RELATES_TO));
+    }
+  }
+
+  /**
+   * Updates filter list based on fields for confs and metrics to retrieve.
+   *
+   * @param listBasedOnFields filter list based on fields.
+   * @throws IOException if any problem occurs while updating filter list.
+   */
+  private void updateFilterForConfsAndMetricsToRetrieve(
+      FilterList listBasedOnFields) throws IOException {
+    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
+    // Please note that if confsToRetrieve is specified, we would have added
+    // CONFS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
+      // Create a filter list for configs.
+      listBasedOnFields.addFilter(
+          TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getConfsToRetrieve(),
+              SubApplicationColumnFamily.CONFIGS,
+              SubApplicationColumnPrefix.CONFIG));
+    }
+
+    // Please note that if metricsToRetrieve is specified, we would have added
+    // METRICS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
+      // Create a filter list for metrics.
+      listBasedOnFields.addFilter(
+          TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getMetricsToRetrieve(),
+              SubApplicationColumnFamily.METRICS,
+              SubApplicationColumnPrefix.METRIC));
+    }
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFields() throws IOException {
+    if (!needCreateFilterListBasedOnFields()) {
+      // Fetch all the columns. No need of a filter.
+      return null;
+    }
+    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
+    FilterList infoColFamilyList = new FilterList();
+    // By default fetch everything in INFO column family.
+    FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL,
+        new BinaryComparator(SubApplicationColumnFamily.INFO.getBytes()));
+    infoColFamilyList.addFilter(infoColumnFamily);
+    if (fetchPartialColsFromInfoFamily()) {
+      // We can fetch only some of the columns from info family.
+      infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
+    } else {
+      // Exclude column prefixes in info column family which are not required
+      // based on fields to retrieve.
+      excludeFieldsFromInfoColFamily(infoColFamilyList);
+    }
+    listBasedOnFields.addFilter(infoColFamilyList);
+    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
+    return listBasedOnFields;
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
+    Preconditions.checkNotNull(getDataToRetrieve(),
+        "data to retrieve shouldn't be null");
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getDoAsUser(),
+        "DoAsUser shouldn't be null");
+    Preconditions.checkNotNull(getContext().getEntityType(),
+        "entityType shouldn't be null");
+  }
+
+  @Override
+  protected void augmentParams(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
+    createFiltersIfNull();
+  }
+
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.setMetricsTimeRange(query,
+        SubApplicationColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
+  }
+
+  @Override
+  protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+
+    // Scan through part of the table to find the entities belong to one app
+    // and one type
+    Scan scan = new Scan();
+    TimelineReaderContext context = getContext();
+    if (context.getDoAsUser() == null) {
+      throw new BadRequestException("Invalid user!");
+    }
+
+    RowKeyPrefix<SubApplicationRowKey> subApplicationRowKeyPrefix = null;
+    // default mode, will always scans from beginning of entity type.
+    if (getFilters() == null || getFilters().getFromId() == null) {
+      subApplicationRowKeyPrefix = new SubApplicationRowKeyPrefix(
+          context.getDoAsUser(), context.getClusterId(),
+          context.getEntityType(), null, null, null);
+      scan.setRowPrefixFilter(subApplicationRowKeyPrefix.getRowKeyPrefix());
+    } else { // pagination mode, will scan from given entityIdPrefix!enitityId
+
+      SubApplicationRowKey entityRowKey = null;
+      try {
+        entityRowKey = SubApplicationRowKey
+            .parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(entityRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+
+      // set start row
+      scan.setStartRow(entityRowKey.getRowKey());
+
+      // get the bytes for stop row
+      subApplicationRowKeyPrefix = new SubApplicationRowKeyPrefix(
+          context.getDoAsUser(), context.getClusterId(),
+          context.getEntityType(), null, null, null);
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              subApplicationRowKeyPrefix.getRowKeyPrefix()));
+
+      // set page filter to limit. This filter has to set only in pagination
+      // mode.
+      filterList.addFilter(new PageFilter(getFilters().getLimit()));
+    }
+    setMetricsTimeRange(scan);
+    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
+    if (filterList != null && !filterList.getFilters().isEmpty()) {
+      scan.setFilter(filterList);
+    }
+    return getTable().getResultScanner(hbaseConf, conn, scan);
+  }
+
+  @Override
+  protected Result getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    throw new UnsupportedOperationException(
+        "we don't support a single entity query");
+  }
+
+  @Override
+  protected TimelineEntity parseEntity(Result result) throws IOException {
+    if (result == null || result.isEmpty()) {
+      return null;
+    }
+    TimelineEntity entity = new TimelineEntity();
+    SubApplicationRowKey parseRowKey =
+        SubApplicationRowKey.parseRowKey(result.getRow());
+    entity.setType(parseRowKey.getEntityType());
+    entity.setId(parseRowKey.getEntityId());
+    entity.setIdPrefix(parseRowKey.getEntityIdPrefix().longValue());
+
+    TimelineEntityFilters filters = getFilters();
+    // fetch created time
+    Long createdTime =
+        (Long) SubApplicationColumn.CREATED_TIME.readResult(result);
+    entity.setCreatedTime(createdTime);
+
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // fetch is related to entities and match isRelatedTo filter. If isRelatedTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // isRelatedTo are not set in HBase scan.
+    boolean checkIsRelatedTo =
+        filters.getIsRelatedTo() != null
+            && filters.getIsRelatedTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO) || checkIsRelatedTo) {
+      readRelationship(entity, result, SubApplicationColumnPrefix.IS_RELATED_TO,
+          true);
+      if (checkIsRelatedTo && !TimelineStorageUtils.matchIsRelatedTo(entity,
+          filters.getIsRelatedTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+        entity.getIsRelatedToEntities().clear();
+      }
+    }
+
+    // fetch relates to entities and match relatesTo filter. If relatesTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // relatesTo are not set in HBase scan.
+    boolean checkRelatesTo =
+        !isSingleEntityRead() && filters.getRelatesTo() != null
+            && filters.getRelatesTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO) || checkRelatesTo) {
+      readRelationship(entity, result, SubApplicationColumnPrefix.RELATES_TO,
+          false);
+      if (checkRelatesTo && !TimelineStorageUtils.matchRelatesTo(entity,
+          filters.getRelatesTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+        entity.getRelatesToEntities().clear();
+      }
+    }
+
+    // fetch info if fieldsToRetrieve contains INFO or ALL.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      readKeyValuePairs(entity, result, SubApplicationColumnPrefix.INFO, false);
+    }
+
+    // fetch configs if fieldsToRetrieve contains CONFIGS or ALL.
+    if (hasField(fieldsToRetrieve, Field.CONFIGS)) {
+      readKeyValuePairs(entity, result, SubApplicationColumnPrefix.CONFIG,
+          true);
+    }
+
+    // fetch events and match event filters if they exist. If event filters do
+    // not match, entity would be dropped. We have to match filters locally
+    // as relevant HBase filters to filter out rows on the basis of events
+    // are not set in HBase scan.
+    boolean checkEvents =
+        !isSingleEntityRead() && filters.getEventFilters() != null
+            && filters.getEventFilters().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.EVENTS) || checkEvents) {
+      readEvents(entity, result, SubApplicationColumnPrefix.EVENT);
+      if (checkEvents && !TimelineStorageUtils.matchEventFilters(entity,
+          filters.getEventFilters())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+        entity.getEvents().clear();
+      }
+    }
+
+    // fetch metrics if fieldsToRetrieve contains METRICS or ALL.
+    if (hasField(fieldsToRetrieve, Field.METRICS)) {
+      readMetrics(entity, result, SubApplicationColumnPrefix.METRIC);
+    }
+
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        parseRowKey.getRowKeyAsString());
+    return entity;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
index 16fffa4..fa16077 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
@@ -82,6 +82,9 @@ public final class TimelineEntityReaderFactory {
         YARN_FLOW_RUN.matches(context.getEntityType())) {
       return new FlowRunEntityReader(context, filters, dataToRetrieve);
     } else {
+      if (context.getDoAsUser() != null) {
+        return new SubApplicationEntityReader(context, filters, dataToRetrieve);
+      }
       // assume we're dealing with a generic entity read
       return new GenericEntityReader(context, filters, dataToRetrieve);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java
index 5f308cb..67c3d29 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderContext.java
@@ -32,6 +32,7 @@ public class TimelineReaderContext extends TimelineContext {
   private String entityType;
   private String entityId;
   private Long entityIdPrefix;
+  private String doAsUser;
   public TimelineReaderContext(String clusterId, String userId, String flowName,
       Long flowRunId, String appId, String entityType, String entityId) {
     super(clusterId, userId, flowName, flowRunId, appId);
@@ -46,10 +47,18 @@ public class TimelineReaderContext extends TimelineContext {
     this.entityIdPrefix = entityIdPrefix;
   }
 
+  public TimelineReaderContext(String clusterId, String userId, String flowName,
+      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
+      String entityId, String doasUser) {
+    this(clusterId, userId, flowName, flowRunId, appId, entityType, entityId);
+    this.entityIdPrefix = entityIdPrefix;
+    this.doAsUser = doasUser;
+  }
+
   public TimelineReaderContext(TimelineReaderContext other) {
     this(other.getClusterId(), other.getUserId(), other.getFlowName(),
         other.getFlowRunId(), other.getAppId(), other.getEntityType(),
-        other.getEntityIdPrefix(), other.getEntityId());
+        other.getEntityIdPrefix(), other.getEntityId(), other.getDoAsUser());
   }
 
   @Override
@@ -113,4 +122,12 @@ public class TimelineReaderContext extends TimelineContext {
   public void setEntityIdPrefix(Long entityIdPrefix) {
     this.entityIdPrefix = entityIdPrefix;
   }
+
+  public String getDoAsUser() {
+    return doAsUser;
+  }
+
+  public void setDoAsUser(String doAsUser) {
+    this.doAsUser = doAsUser;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
index ee827da..67e5849 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderManager.java
@@ -111,8 +111,13 @@ public class TimelineReaderManager extends AbstractService {
     context.setEntityType(entity.getType());
     context.setEntityIdPrefix(entity.getIdPrefix());
     context.setEntityId(entity.getId());
-    entity.setUID(TimelineReaderUtils.UID_KEY,
-        TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context));
+    if (context.getDoAsUser() != null) {
+      entity.setUID(TimelineReaderUtils.UID_KEY,
+          TimelineUIDConverter.SUB_APPLICATION_ENTITY_UID.encodeUID(context));
+    } else {
+      entity.setUID(TimelineReaderUtils.UID_KEY,
+          TimelineUIDConverter.GENERIC_ENTITY_UID.encodeUID(context));
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index d67de71..dfe04f9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -3245,4 +3245,162 @@ public class TimelineReaderWebServices {
         " (Took " + (endTime - startTime) + " ms.)");
     return results;
   }
+
+  @GET
+  @Path("/users/{userid}/entities/{entitytype}")
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Set<TimelineEntity> getSubAppEntities(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("userid") String userId,
+      @PathParam("entitytype") String entityType,
+      @QueryParam("limit") String limit,
+      @QueryParam("createdtimestart") String createdTimeStart,
+      @QueryParam("createdtimeend") String createdTimeEnd,
+      @QueryParam("relatesto") String relatesTo,
+      @QueryParam("isrelatedto") String isRelatedTo,
+      @QueryParam("infofilters") String infofilters,
+      @QueryParam("conffilters") String conffilters,
+      @QueryParam("metricfilters") String metricfilters,
+      @QueryParam("eventfilters") String eventfilters,
+      @QueryParam("confstoretrieve") String confsToRetrieve,
+      @QueryParam("metricstoretrieve") String metricsToRetrieve,
+      @QueryParam("fields") String fields,
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
+    return getSubAppEntities(req, res, null, userId, entityType, limit,
+        createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo, infofilters,
+        conffilters, metricfilters, eventfilters, confsToRetrieve,
+        metricsToRetrieve, fields, metricsLimit, metricsTimeStart,
+        metricsTimeEnd, fromId);
+  }
+
+  @GET
+  @Path("/clusters/{clusterid}/users/{userid}/entities/{entitytype}")
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Set<TimelineEntity> getSubAppEntities(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("clusterid") String clusterId,
+      @PathParam("userid") String userId,
+      @PathParam("entitytype") String entityType,
+      @QueryParam("limit") String limit,
+      @QueryParam("createdtimestart") String createdTimeStart,
+      @QueryParam("createdtimeend") String createdTimeEnd,
+      @QueryParam("relatesto") String relatesTo,
+      @QueryParam("isrelatedto") String isRelatedTo,
+      @QueryParam("infofilters") String infofilters,
+      @QueryParam("conffilters") String conffilters,
+      @QueryParam("metricfilters") String metricfilters,
+      @QueryParam("eventfilters") String eventfilters,
+      @QueryParam("confstoretrieve") String confsToRetrieve,
+      @QueryParam("metricstoretrieve") String metricsToRetrieve,
+      @QueryParam("fields") String fields,
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("fromid") String fromId) {
+    String url = req.getRequestURI() +
+        (req.getQueryString() == null ? "" :
+            QUERY_STRING_SEP + req.getQueryString());
+    UserGroupInformation callerUGI =
+        TimelineReaderWebServicesUtils.getUser(req);
+    LOG.info("Received URL " + url + " from user " +
+        TimelineReaderWebServicesUtils.getUserName(callerUGI));
+    long startTime = Time.monotonicNow();
+    init(res);
+    TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
+    Set<TimelineEntity> entities = null;
+    try {
+      TimelineReaderContext context =
+          TimelineReaderWebServicesUtils.createTimelineReaderContext(clusterId,
+              null, null, null, null, entityType, null, null, userId);
+      entities = timelineReaderManager.getEntities(context,
+          TimelineReaderWebServicesUtils.createTimelineEntityFilters(
+          limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
+              infofilters, conffilters, metricfilters, eventfilters,
+              fromId),
+          TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
+          confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+          metricsTimeStart, metricsTimeEnd));
+    } catch (Exception e) {
+      handleException(e, url, startTime,
+          "createdTime start/end or limit");
+    }
+    long endTime = Time.monotonicNow();
+    if (entities == null) {
+      entities = Collections.emptySet();
+    }
+    LOG.info("Processed URL " + url +
+        " (Took " + (endTime - startTime) + " ms.)");
+    return entities;
+  }
+
+  @GET
+  @Path("/users/{userid}/entities/{entitytype}/{entityid}")
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Set<TimelineEntity> getSubAppEntities(@Context HttpServletRequest req,
+      @Context HttpServletResponse res, @PathParam("userid") String userId,
+      @PathParam("entitytype") String entityType,
+      @PathParam("entityid") String entityId,
+      @QueryParam("confstoretrieve") String confsToRetrieve,
+      @QueryParam("metricstoretrieve") String metricsToRetrieve,
+      @QueryParam("fields") String fields,
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
+    return getSubAppEntities(req, res, null, userId, entityType, entityId,
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+        metricsTimeStart, metricsTimeEnd, entityIdPrefix);
+  }
+
+  @GET
+  @Path("/clusters/{clusterid}/users/{userid}/entities/{entitytype}/{entityid}")
+  @Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
+  public Set<TimelineEntity> getSubAppEntities(@Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("clusterid") String clusterId,
+      @PathParam("userid") String userId,
+      @PathParam("entitytype") String entityType,
+      @PathParam("entityid") String entityId,
+      @QueryParam("confstoretrieve") String confsToRetrieve,
+      @QueryParam("metricstoretrieve") String metricsToRetrieve,
+      @QueryParam("fields") String fields,
+      @QueryParam("metricslimit") String metricsLimit,
+      @QueryParam("metricstimestart") String metricsTimeStart,
+      @QueryParam("metricstimeend") String metricsTimeEnd,
+      @QueryParam("entityidprefix") String entityIdPrefix) {
+    String url = req.getRequestURI() + (req.getQueryString() == null ? ""
+        : QUERY_STRING_SEP + req.getQueryString());
+    UserGroupInformation callerUGI =
+        TimelineReaderWebServicesUtils.getUser(req);
+    LOG.info("Received URL " + url + " from user "
+        + TimelineReaderWebServicesUtils.getUserName(callerUGI));
+    long startTime = Time.monotonicNow();
+    init(res);
+    TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
+    Set<TimelineEntity> entities = null;
+    try {
+      TimelineReaderContext context = TimelineReaderWebServicesUtils
+          .createTimelineReaderContext(clusterId, null, null, null, null,
+              entityType, entityIdPrefix, entityId, userId);
+      entities = timelineReaderManager.getEntities(context,
+          new TimelineEntityFilters.Builder().build(),
+          TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
+              confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
+              metricsTimeStart, metricsTimeEnd));
+    } catch (Exception e) {
+      handleException(e, url, startTime, "");
+    }
+    long endTime = Time.monotonicNow();
+    if (entities == null) {
+      entities = Collections.emptySet();
+    }
+    LOG.info(
+        "Processed URL " + url + " (Took " + (endTime - startTime) + " ms.)");
+    return entities;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index d613eab..f83c1ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -57,6 +57,16 @@ public final class TimelineReaderWebServicesUtils {
         parseStr(entityType), parseLongStr(entityIdPrefix), parseStr(entityId));
   }
 
+  static TimelineReaderContext createTimelineReaderContext(String clusterId,
+      String userId, String flowName, String flowRunId, String appId,
+      String entityType, String entityIdPrefix, String entityId,
+      String doAsUser) {
+    return new TimelineReaderContext(parseStr(clusterId), parseStr(userId),
+        parseStr(flowName), parseLongStr(flowRunId), parseStr(appId),
+        parseStr(entityType), parseLongStr(entityIdPrefix), parseStr(entityId),
+        parseStr(doAsUser));
+  }
+
   /**
    * Parse the passed filters represented as strings and convert them into a
    * {@link TimelineEntityFilters} object.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
index 52e24e1..b875828 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineUIDConverter.java
@@ -137,6 +137,41 @@ enum TimelineUIDConverter {
     }
   },
 
+  // Sub Application Entity UID should contain cluster, user, entity type and
+  // entity id
+  SUB_APPLICATION_ENTITY_UID {
+    @Override
+    String encodeUID(TimelineReaderContext context) {
+      if (context == null) {
+        return null;
+      }
+      if (context.getClusterId() == null || context.getDoAsUser() == null
+          || context.getEntityType() == null || context.getEntityId() == null) {
+        return null;
+      }
+      String[] entityTupleArr = {context.getClusterId(), context.getDoAsUser(),
+          context.getEntityType(), context.getEntityIdPrefix().toString(),
+          context.getEntityId()};
+      return joinAndEscapeUIDParts(entityTupleArr);
+    }
+
+    @Override
+    TimelineReaderContext decodeUID(String uId) throws Exception {
+      if (uId == null) {
+        return null;
+      }
+      List<String> entityTupleList = splitUID(uId);
+      if (entityTupleList.size() == 5) {
+        // Flow information exists.
+        return new TimelineReaderContext(entityTupleList.get(0), null, null,
+            null, null, entityTupleList.get(2),
+            Long.parseLong(entityTupleList.get(3)), entityTupleList.get(4),
+            entityTupleList.get(1));
+      }
+      return null;
+    }
+  },
+
   // Generic Entity UID should contain cluster, user, flow name, flowrun id,
   // app id, entity type and entity id OR should contain cluster, appid, entity
   // type and entity id(i.e.without flow context info).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2efebdd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
index 11dc913..12b3fc0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineUIDConverter.java
@@ -68,6 +68,15 @@ public class TestTimelineUIDConverter {
         "54321!container_1111111111_1111_01_000001", uid);
     assertEquals(
         context, TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uid));
+
+    context = new TimelineReaderContext("yarn_cluster", null, null, null, null,
+        "YARN_CONTAINER", 54321L, "container_1111111111_1111_01_000001",
+        "user1");
+    uid = TimelineUIDConverter.SUB_APPLICATION_ENTITY_UID.encodeUID(context);
+    assertEquals("yarn_cluster!user1!YARN_CONTAINER!"
+        + "54321!container_1111111111_1111_01_000001", uid);
+    assertEquals(context,
+        TimelineUIDConverter.SUB_APPLICATION_ENTITY_UID.decodeUID(uid));
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-6638. [ATSv2 Security] Timeline reader side changes for loading auth filters and principals. Contributed by Varun Saxena

Posted by st...@apache.org.
YARN-6638. [ATSv2 Security] Timeline reader side changes for loading auth filters and principals. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3f11e3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3f11e3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3f11e3f

Branch: refs/heads/HADOOP-13345
Commit: d3f11e3f13ed5efc7f0b7f19567d142e554c35ed
Parents: 879de51
Author: Jian He <ji...@apache.org>
Authored: Fri Jun 9 13:42:38 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 ...TimelineAuthenticationFilterInitializer.java | 69 ++++++++++---------
 .../AbstractTimelineReaderHBaseTestBase.java    | 11 +--
 .../reader/TimelineReaderServer.java            | 70 +++++++++++---------
 ...neReaderAuthenticationFilterInitializer.java | 53 +++++++++++++++
 .../reader/security/package-info.java           | 25 +++++++
 5 files changed, 160 insertions(+), 68 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3f11e3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
index 4e7c29a..06f9868e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
@@ -51,30 +51,18 @@ import java.util.Map;
 public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
 
   /**
-   * The configuration prefix of timeline HTTP authentication
+   * The configuration prefix of timeline HTTP authentication.
    */
   public static final String PREFIX = "yarn.timeline-service.http-authentication.";
 
   @VisibleForTesting
   Map<String, String> filterConfig;
 
-  /**
-   * Initializes {@link TimelineAuthenticationFilter}
-   * <p>
-   * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
-   * configuration properties prefixed with {@value #PREFIX}
-   *
-   * @param container
-   *          The filter container
-   * @param conf
-   *          Configuration for run-time parameters
-   */
-  @Override
-  public void initFilter(FilterContainer container, Configuration conf) {
+  protected void setAuthFilterConfig(Configuration conf) {
     filterConfig = new HashMap<String, String>();
 
     // setting the cookie path to root '/' so it is used for all resources.
-    filterConfig.put(TimelineAuthenticationFilter.COOKIE_PATH, "/");
+    filterConfig.put(AuthenticationFilter.COOKIE_PATH, "/");
 
     for (Map.Entry<String, String> entry : conf) {
       String name = entry.getKey();
@@ -95,6 +83,41 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
       }
     }
 
+    // Resolve _HOST into bind address
+    String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
+    String principal =
+        filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
+    if (principal != null) {
+      try {
+        principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
+      } catch (IOException ex) {
+        throw new RuntimeException("Could not resolve Kerberos principal " +
+            "name: " + ex.toString(), ex);
+      }
+      filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL,
+          principal);
+    }
+  }
+
+  protected Map<String, String> getFilterConfig() {
+    return filterConfig;
+  }
+
+  /**
+   * Initializes {@link TimelineAuthenticationFilter}
+   * <p>
+   * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
+   * configuration properties prefixed with {@value #PREFIX}
+   *
+   * @param container
+   *          The filter container
+   * @param conf
+   *          Configuration for run-time parameters
+   */
+  @Override
+  public void initFilter(FilterContainer container, Configuration conf) {
+    setAuthFilterConfig(conf);
+
     String authType = filterConfig.get(AuthenticationFilter.AUTH_TYPE);
     if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
       filterConfig.put(AuthenticationFilter.AUTH_TYPE,
@@ -102,23 +125,7 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
     } else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
       filterConfig.put(AuthenticationFilter.AUTH_TYPE,
           KerberosDelegationTokenAuthenticationHandler.class.getName());
-
-      // Resolve _HOST into bind address
-      String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
-      String principal =
-          filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
-      if (principal != null) {
-        try {
-          principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
-        } catch (IOException ex) {
-          throw new RuntimeException(
-              "Could not resolve Kerberos principal name: " + ex.toString(), ex);
-        }
-        filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL,
-            principal);
-      }
     }
-
     filterConfig.put(DelegationTokenAuthenticationHandler.TOKEN_KIND,
         TimelineDelegationTokenIdentifier.KIND_NAME.toString());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3f11e3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
index 7853c94..3519c3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/AbstractTimelineReaderHBaseTestBase.java
@@ -88,13 +88,14 @@ public abstract class AbstractTimelineReaderHBaseTestBase {
       config.setInt("hfile.format.version", 3);
       server = new TimelineReaderServer() {
         @Override
-        protected void setupOptions(Configuration conf) {
-          // The parent code tries to use HttpServer2 from this version of
-          // Hadoop, but the tests are loading in HttpServer2 from
-          // ${hbase-compatible-hadoop.version}. This version uses Jetty 9
+        protected void addFilters(Configuration conf) {
+          // The parent code uses hadoop-common jar from this version of
+          // Hadoop, but the tests are using hadoop-common jar from
+          // ${hbase-compatible-hadoop.version}.  This version uses Jetty 9
           // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
           // are many differences, including classnames and packages.
-          // We do nothing here, so that we don't cause a NoSuchMethodError.
+          // We do nothing here, so that we don't cause a NoSuchMethodError or
+          // NoClassDefFoundError.
           // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
           // we should be able to remove this @Override.
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3f11e3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
index 6cdf937..61f2425 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderServer.java
@@ -18,19 +18,18 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
-
+import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.net.URI;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.LinkedHashSet;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.http.lib.StaticUserWebFilter;
 import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -40,7 +39,9 @@ import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.timelineservice.reader.security.TimelineReaderAuthenticationFilterInitializer;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader;
+import org.apache.hadoop.yarn.server.util.timeline.TimelineServerUtils;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
@@ -71,6 +72,17 @@ public class TimelineReaderServer extends CompositeService {
     if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
       throw new YarnException("timeline service v.2 is not enabled");
     }
+    InetSocketAddress bindAddr = conf.getSocketAddr(
+        YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
+            YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
+                YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
+    // Login from keytab if security is enabled.
+    try {
+      SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+          YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, bindAddr.getHostName());
+    } catch(IOException e) {
+      throw new YarnRuntimeException("Failed to login from keytab", e);
+    }
 
     TimelineReader timelineReaderStore = createTimelineReaderStore(conf);
     timelineReaderStore.init(conf);
@@ -130,29 +142,39 @@ public class TimelineReaderServer extends CompositeService {
     super.serviceStop();
   }
 
-  private void startTimelineReaderWebApp() {
-    Configuration conf = getConfig();
-    String bindAddress = WebAppUtils.getWebAppBindURL(conf,
-        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
-        WebAppUtils.getTimelineReaderWebAppURL(conf));
-    LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
+  protected void addFilters(Configuration conf) {
     boolean enableCorsFilter = conf.getBoolean(
         YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED,
         YarnConfiguration.TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT);
-    // setup CORS
+    // Setup CORS
     if (enableCorsFilter) {
       conf.setBoolean(HttpCrossOriginFilterInitializer.PREFIX
           + HttpCrossOriginFilterInitializer.ENABLED_SUFFIX, true);
     }
+    String initializers = conf.get("hadoop.http.filter.initializers", "");
+    Set<String> defaultInitializers = new LinkedHashSet<String>();
+    if (!initializers.contains(
+        TimelineReaderAuthenticationFilterInitializer.class.getName())) {
+      defaultInitializers.add(
+          TimelineReaderAuthenticationFilterInitializer.class.getName());
+    }
+    TimelineServerUtils.setTimelineFilters(
+        conf, initializers, defaultInitializers);
+  }
+
+  private void startTimelineReaderWebApp() {
+    Configuration conf = getConfig();
+    addFilters(conf);
+    String bindAddress = WebAppUtils.getWebAppBindURL(conf,
+        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+        WebAppUtils.getTimelineReaderWebAppURL(conf));
+    LOG.info("Instantiating TimelineReaderWebApp at " + bindAddress);
     try {
       HttpServer2.Builder builder = new HttpServer2.Builder()
             .setName("timeline")
             .setConf(conf)
             .addEndpoint(URI.create("http://" + bindAddress));
       readerWebServer = builder.build();
-
-      setupOptions(conf);
-
       readerWebServer.addJerseyResourcePackage(
           TimelineReaderWebServices.class.getPackage().getName() + ";"
               + GenericExceptionHandler.class.getPackage().getName() + ";"
@@ -168,22 +190,6 @@ public class TimelineReaderServer extends CompositeService {
     }
   }
 
-  /**
-   * Sets up some options and filters.
-   *
-   * @param conf Configuration
-   */
-  protected void setupOptions(Configuration conf) {
-    Map<String, String> options = new HashMap<>();
-    String username = conf.get(HADOOP_HTTP_STATIC_USER,
-        DEFAULT_HADOOP_HTTP_STATIC_USER);
-    options.put(HADOOP_HTTP_STATIC_USER, username);
-    HttpServer2.defineFilter(readerWebServer.getWebAppContext(),
-        "static_user_filter_timeline",
-        StaticUserWebFilter.StaticUserFilter.class.getName(),
-        options, new String[] {"/*"});
-  }
-
   @VisibleForTesting
   public int getWebServerPort() {
     return readerWebServer.getConnectorAddress(0).getPort();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3f11e3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
new file mode 100644
index 0000000..e0e1f4d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.http.FilterContainer;
+import org.apache.hadoop.security.AuthenticationWithProxyUserFilter;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
+
+/**
+ * Filter initializer to initialize {@link AuthenticationWithProxyUserFilter}
+ * for ATSv2 timeline reader server with timeline service specific
+ * configurations.
+ */
+public class TimelineReaderAuthenticationFilterInitializer extends
+    TimelineAuthenticationFilterInitializer{
+
+  /**
+   * Initializes {@link AuthenticationWithProxyUserFilter}
+   * <p>
+   * Propagates to {@link AuthenticationWithProxyUserFilter} configuration all
+   * YARN configuration properties prefixed with
+   * {@value TimelineAuthenticationFilterInitializer#PREFIX}.
+   *
+   * @param container
+   *          The filter container
+   * @param conf
+   *          Configuration for run-time parameters
+   */
+  @Override
+  public void initFilter(FilterContainer container, Configuration conf) {
+    setAuthFilterConfig(conf);
+    container.addGlobalFilter("Timeline Reader Authentication Filter",
+        AuthenticationWithProxyUserFilter.class.getName(),
+        getFilterConfig());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3f11e3f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/package-info.java
new file mode 100644
index 0000000..5888c98
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/package-info.java
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.server.timelineservice.reader.security contains
+ * classes to be used to support SPNEGO authentication for timeline reader.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.timelineservice.reader.security;
+import org.apache.hadoop.classification.InterfaceAudience;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: YARN-7115. Move BoundedAppender to org.hadoop.yarn.util pacakge (Contributed by Jian He via Daniel Templeton)

Posted by st...@apache.org.
YARN-7115. Move BoundedAppender to org.hadoop.yarn.util pacakge
(Contributed by Jian He via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc23514a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc23514a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc23514a

Branch: refs/heads/HADOOP-13345
Commit: cc23514abacb4d6589e731cc5ce5d8e6f19c955d
Parents: 3e0e203
Author: Daniel Templeton <te...@apache.org>
Authored: Wed Aug 30 17:26:13 2017 -0700
Committer: Daniel Templeton <te...@apache.org>
Committed: Wed Aug 30 17:26:13 2017 -0700

----------------------------------------------------------------------
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |  14 +-
 .../hadoop/yarn/util/BoundedAppender.java       | 142 +++++++++++++++++++
 .../hadoop/yarn/util/TestBoundedAppender.java   | 115 +++++++++++++++
 .../rmapp/attempt/RMAppAttemptImpl.java         | 114 +--------------
 .../rmapp/attempt/TestBoundedAppender.java      | 116 ---------------
 .../TestRMAppAttemptImplDiagnostics.java        |   3 +-
 6 files changed, 270 insertions(+), 234 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc23514a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index f17cf8c..c2a5c67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -108,6 +108,15 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-annotations</artifactId>
     </dependency>
+    <!--
+    junit must be before mockito-all on the classpath.  mockito-all bundles its
+    own copy of the hamcrest classes, but they don't match our junit version.
+    -->
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-all</artifactId>
@@ -129,11 +138,6 @@
       <artifactId>protobuf-java</artifactId>
     </dependency>
     <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
       <groupId>org.bouncycastle</groupId>
       <artifactId>bcprov-jdk16</artifactId>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc23514a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java
new file mode 100644
index 0000000..917d696
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A {@link CharSequence} appender that considers its {@link #limit} as upper
+ * bound.
+ * <p>
+ * When {@link #limit} would be reached on append, past messages will be
+ * truncated from head, and a header telling the user about truncation will be
+ * prepended, with ellipses in between header and messages.
+ * <p>
+ * Note that header and ellipses are not counted against {@link #limit}.
+ * <p>
+ * An example:
+ *
+ * <pre>
+ * {@code
+ *   // At the beginning it's an empty string
+ *   final Appendable shortAppender = new BoundedAppender(80);
+ *   // The whole message fits into limit
+ *   shortAppender.append(
+ *       "message1 this is a very long message but fitting into limit\n");
+ *   // The first message is truncated, the second not
+ *   shortAppender.append("message2 this is shorter than the previous one\n");
+ *   // The first message is deleted, the second truncated, the third
+ *   // preserved
+ *   shortAppender.append("message3 this is even shorter message, maybe.\n");
+ *   // The first two are deleted, the third one truncated, the last preserved
+ *   shortAppender.append("message4 the shortest one, yet the greatest :)");
+ *   // Current contents are like this:
+ *   // Diagnostic messages truncated, showing last 80 chars out of 199:
+ *   // ...s is even shorter message, maybe.
+ *   // message4 the shortest one, yet the greatest :)
+ * }
+ * </pre>
+ * <p>
+ * Note that <tt>null</tt> values are {@link #append(CharSequence) append}ed
+ * just like in {@link StringBuilder#append(CharSequence) original
+ * implementation}.
+ * <p>
+ * Note that this class is not thread safe.
+ */
+
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+@VisibleForTesting
+public class BoundedAppender {
+  @VisibleForTesting
+  public static final String TRUNCATED_MESSAGES_TEMPLATE =
+      "Diagnostic messages truncated, showing last "
+          + "%d chars out of %d:%n...%s";
+
+  private final int limit;
+  private final StringBuilder messages = new StringBuilder();
+  private int totalCharacterCount = 0;
+
+  public BoundedAppender(final int limit) {
+    Preconditions.checkArgument(limit > 0, "limit should be positive");
+
+    this.limit = limit;
+  }
+
+  /**
+   * Append a {@link CharSequence} considering {@link #limit}, truncating
+   * from the head of {@code csq} or {@link #messages} when necessary.
+   *
+   * @param csq the {@link CharSequence} to append
+   * @return this
+   */
+  public BoundedAppender append(final CharSequence csq) {
+    appendAndCount(csq);
+    checkAndCut();
+
+    return this;
+  }
+
+  private void appendAndCount(final CharSequence csq) {
+    final int before = messages.length();
+    messages.append(csq);
+    final int after = messages.length();
+    totalCharacterCount += after - before;
+  }
+
+  private void checkAndCut() {
+    if (messages.length() > limit) {
+      final int newStart = messages.length() - limit;
+      messages.delete(0, newStart);
+    }
+  }
+
+  /**
+   * Get current length of messages considering truncates
+   * without header and ellipses.
+   *
+   * @return current length
+   */
+  public int length() {
+    return messages.length();
+  }
+
+  public int getLimit() {
+    return limit;
+  }
+
+  /**
+   * Get a string representation of the actual contents, displaying also a
+   * header and ellipses when there was a truncate.
+   *
+   * @return String representation of the {@link #messages}
+   */
+  @Override
+  public String toString() {
+    if (messages.length() < totalCharacterCount) {
+      return String.format(TRUNCATED_MESSAGES_TEMPLATE, messages.length(),
+          totalCharacterCount, messages.toString());
+    }
+
+    return messages.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc23514a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestBoundedAppender.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestBoundedAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestBoundedAppender.java
new file mode 100644
index 0000000..2b9cfce
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestBoundedAppender.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test class for {@link BoundedAppender}.
+ */
+public class TestBoundedAppender {
+  @Rule
+  public ExpectedException expected = ExpectedException.none();
+
+  @Test
+  public void initWithZeroLimitThrowsException() {
+    expected.expect(IllegalArgumentException.class);
+    expected.expectMessage("limit should be positive");
+
+    new BoundedAppender(0);
+  }
+
+  @Test
+  public void nullAppendedNullStringRead() {
+    final BoundedAppender boundedAppender = new BoundedAppender(4);
+    boundedAppender.append(null);
+
+    assertEquals("null appended, \"null\" read", "null",
+        boundedAppender.toString());
+  }
+
+  @Test
+  public void appendBelowLimitOnceValueIsReadCorrectly() {
+    final BoundedAppender boundedAppender = new BoundedAppender(2);
+
+    boundedAppender.append("ab");
+
+    assertEquals("value appended is read correctly", "ab",
+        boundedAppender.toString());
+  }
+
+  @Test
+  public void appendValuesBelowLimitAreReadCorrectlyInFifoOrder() {
+    final BoundedAppender boundedAppender = new BoundedAppender(3);
+
+    boundedAppender.append("ab");
+    boundedAppender.append("cd");
+    boundedAppender.append("e");
+    boundedAppender.append("fg");
+
+    assertEquals("last values appended fitting limit are read correctly",
+        String.format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 7, "efg"),
+        boundedAppender.toString());
+  }
+
+  @Test
+  public void appendLastAboveLimitPreservesLastMessagePostfix() {
+    final BoundedAppender boundedAppender = new BoundedAppender(3);
+
+    boundedAppender.append("ab");
+    boundedAppender.append("cde");
+    boundedAppender.append("fghij");
+
+    assertEquals(
+        "last value appended above limit postfix is read correctly", String
+            .format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 10, "hij"),
+        boundedAppender.toString());
+  }
+
+  @Test
+  public void appendMiddleAboveLimitPreservesLastMessageAndMiddlePostfix() {
+    final BoundedAppender boundedAppender = new BoundedAppender(3);
+
+    boundedAppender.append("ab");
+    boundedAppender.append("cde");
+
+    assertEquals("last value appended above limit postfix is read correctly",
+        String.format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 5, "cde"),
+        boundedAppender.toString());
+
+    boundedAppender.append("fg");
+
+    assertEquals(
+        "middle value appended above limit postfix and last value are "
+            + "read correctly",
+        String.format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 7, "efg"),
+        boundedAppender.toString());
+
+    boundedAppender.append("hijkl");
+
+    assertEquals(
+        "last value appended above limit postfix is read correctly", String
+            .format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 12, "jkl"),
+        boundedAppender.toString());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc23514a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index d748860..65412df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -38,7 +38,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import javax.crypto.SecretKey;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -110,6 +109,7 @@ import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.BoundedAppender;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -1326,7 +1326,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
     // AFTER the initial saving on app-attempt-start
     // These fields can be visible from outside only after they are saved in
     // StateStore
-    BoundedAppender diags = new BoundedAppender(diagnostics.limit);
+    BoundedAppender diags = new BoundedAppender(diagnostics.getLimit());
 
     // don't leave the tracking URL pointing to a non-existent AM
     if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
@@ -2295,114 +2295,4 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
     return Collections.EMPTY_SET;
   }
 
-  /**
-   * A {@link CharSequence} appender that considers its {@link #limit} as upper
-   * bound.
-   * <p>
-   * When {@link #limit} would be reached on append, past messages will be
-   * truncated from head, and a header telling the user about truncation will be
-   * prepended, with ellipses in between header and messages.
-   * <p>
-   * Note that header and ellipses are not counted against {@link #limit}.
-   * <p>
-   * An example:
-   *
-   * <pre>
-   * {@code
-   *   // At the beginning it's an empty string
-   *   final Appendable shortAppender = new BoundedAppender(80);
-   *   // The whole message fits into limit
-   *   shortAppender.append(
-   *       "message1 this is a very long message but fitting into limit\n");
-   *   // The first message is truncated, the second not
-   *   shortAppender.append("message2 this is shorter than the previous one\n");
-   *   // The first message is deleted, the second truncated, the third
-   *   // preserved
-   *   shortAppender.append("message3 this is even shorter message, maybe.\n");
-   *   // The first two are deleted, the third one truncated, the last preserved
-   *   shortAppender.append("message4 the shortest one, yet the greatest :)");
-   *   // Current contents are like this:
-   *   // Diagnostic messages truncated, showing last 80 chars out of 199:
-   *   // ...s is even shorter message, maybe.
-   *   // message4 the shortest one, yet the greatest :)
-   * }
-   * </pre>
-   * <p>
-   * Note that <tt>null</tt> values are {@link #append(CharSequence) append}ed
-   * just like in {@link StringBuilder#append(CharSequence) original
-   * implementation}.
-   * <p>
-   * Note that this class is not thread safe.
-   */
-  @VisibleForTesting
-  static class BoundedAppender {
-    @VisibleForTesting
-    static final String TRUNCATED_MESSAGES_TEMPLATE =
-        "Diagnostic messages truncated, showing last "
-            + "%d chars out of %d:%n...%s";
-
-    private final int limit;
-    private final StringBuilder messages = new StringBuilder();
-    private int totalCharacterCount = 0;
-
-    BoundedAppender(final int limit) {
-      Preconditions.checkArgument(limit > 0, "limit should be positive");
-
-      this.limit = limit;
-    }
-
-    /**
-     * Append a {@link CharSequence} considering {@link #limit}, truncating
-     * from the head of {@code csq} or {@link #messages} when necessary.
-     *
-     * @param csq the {@link CharSequence} to append
-     * @return this
-     */
-    BoundedAppender append(final CharSequence csq) {
-      appendAndCount(csq);
-      checkAndCut();
-
-      return this;
-    }
-
-    private void appendAndCount(final CharSequence csq) {
-      final int before = messages.length();
-      messages.append(csq);
-      final int after = messages.length();
-      totalCharacterCount += after - before;
-    }
-
-    private void checkAndCut() {
-      if (messages.length() > limit) {
-        final int newStart = messages.length() - limit;
-        messages.delete(0, newStart);
-      }
-    }
-
-    /**
-     * Get current length of messages considering truncates
-     * without header and ellipses.
-     *
-     * @return current length
-     */
-    int length() {
-      return messages.length();
-    }
-
-    /**
-     * Get a string representation of the actual contents, displaying also a
-     * header and ellipses when there was a truncate.
-     *
-     * @return String representation of the {@link #messages}
-     */
-    @Override
-    public String toString() {
-      if (messages.length() < totalCharacterCount) {
-        return String.format(TRUNCATED_MESSAGES_TEMPLATE, messages.length(),
-            totalCharacterCount, messages.toString());
-      }
-
-      return messages.toString();
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc23514a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestBoundedAppender.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestBoundedAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestBoundedAppender.java
deleted file mode 100644
index 9cb1e04..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestBoundedAppender.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt;
-
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import static org.junit.Assert.assertEquals;
-import static org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl.BoundedAppender;
-
-/**
- * Test class for {@link BoundedAppender}.
- */
-public class TestBoundedAppender {
-  @Rule
-  public ExpectedException expected = ExpectedException.none();
-
-  @Test
-  public void initWithZeroLimitThrowsException() {
-    expected.expect(IllegalArgumentException.class);
-    expected.expectMessage("limit should be positive");
-
-    new BoundedAppender(0);
-  }
-
-  @Test
-  public void nullAppendedNullStringRead() {
-    final BoundedAppender boundedAppender = new BoundedAppender(4);
-    boundedAppender.append(null);
-
-    assertEquals("null appended, \"null\" read", "null",
-        boundedAppender.toString());
-  }
-
-  @Test
-  public void appendBelowLimitOnceValueIsReadCorrectly() {
-    final BoundedAppender boundedAppender = new BoundedAppender(2);
-
-    boundedAppender.append("ab");
-
-    assertEquals("value appended is read correctly", "ab",
-        boundedAppender.toString());
-  }
-
-  @Test
-  public void appendValuesBelowLimitAreReadCorrectlyInFifoOrder() {
-    final BoundedAppender boundedAppender = new BoundedAppender(3);
-
-    boundedAppender.append("ab");
-    boundedAppender.append("cd");
-    boundedAppender.append("e");
-    boundedAppender.append("fg");
-
-    assertEquals("last values appended fitting limit are read correctly",
-        String.format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 7, "efg"),
-        boundedAppender.toString());
-  }
-
-  @Test
-  public void appendLastAboveLimitPreservesLastMessagePostfix() {
-    final BoundedAppender boundedAppender = new BoundedAppender(3);
-
-    boundedAppender.append("ab");
-    boundedAppender.append("cde");
-    boundedAppender.append("fghij");
-
-    assertEquals(
-        "last value appended above limit postfix is read correctly", String
-            .format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 10, "hij"),
-        boundedAppender.toString());
-  }
-
-  @Test
-  public void appendMiddleAboveLimitPreservesLastMessageAndMiddlePostfix() {
-    final BoundedAppender boundedAppender = new BoundedAppender(3);
-
-    boundedAppender.append("ab");
-    boundedAppender.append("cde");
-
-    assertEquals("last value appended above limit postfix is read correctly",
-        String.format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 5, "cde"),
-        boundedAppender.toString());
-
-    boundedAppender.append("fg");
-
-    assertEquals(
-        "middle value appended above limit postfix and last value are "
-            + "read correctly",
-        String.format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 7, "efg"),
-        boundedAppender.toString());
-
-    boundedAppender.append("hijkl");
-
-    assertEquals(
-        "last value appended above limit postfix is read correctly", String
-            .format(BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 3, 12, "jkl"),
-        boundedAppender.toString());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc23514a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java
index 19b5dd9..295b59f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImplDiagnostics.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.util.BoundedAppender;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -86,7 +87,7 @@ public class TestRMAppAttemptImplDiagnostics {
     appAttempt.appendDiagnostics(beyondLimit);
 
     final String truncated = String.format(
-        RMAppAttemptImpl.BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 1024,
+        BoundedAppender.TRUNCATED_MESSAGES_TEMPLATE, 1024,
         1025, beyondLimit.substring(1));
 
     assertEquals("messages beyond limit should be truncated", truncated,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-6146. Add Builder methods for TimelineEntityFilters (Haibo Chen via Varun Saxena)

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 4a9e53e..4d3e769 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -76,12 +76,44 @@ final class TimelineReaderWebServicesUtils {
       String isRelatedTo, String infofilters, String conffilters,
       String metricfilters, String eventfilters,
       String fromid) throws TimelineParseException {
-    return new TimelineEntityFilters(parseLongStr(limit),
-        parseLongStr(createdTimeStart), parseLongStr(createdTimeEnd),
-        parseRelationFilters(relatesTo), parseRelationFilters(isRelatedTo),
-        parseKVFilters(infofilters, false), parseKVFilters(conffilters, true),
-        parseMetricFilters(metricfilters), parseEventFilters(eventfilters),
-        parseStr(fromid));
+    return createTimelineEntityFilters(
+        limit, parseLongStr(createdTimeStart),
+        parseLongStr(createdTimeEnd),
+        relatesTo, isRelatedTo, infofilters,
+        conffilters, metricfilters, eventfilters, fromid);
+  }
+
+  /**
+   * Parse the passed filters represented as strings and convert them into a
+   * {@link TimelineEntityFilters} object.
+   * @param limit Limit to number of entities to return.
+   * @param createdTimeStart Created time start for the entities to return.
+   * @param createdTimeEnd Created time end for the entities to return.
+   * @param relatesTo Entities to return must match relatesTo.
+   * @param isRelatedTo Entities to return must match isRelatedTo.
+   * @param infofilters Entities to return must match these info filters.
+   * @param conffilters Entities to return must match these metric filters.
+   * @param metricfilters Entities to return must match these metric filters.
+   * @param eventfilters Entities to return must match these event filters.
+   * @return a {@link TimelineEntityFilters} object.
+   * @throws TimelineParseException if any problem occurs during parsing.
+   */
+  static TimelineEntityFilters createTimelineEntityFilters(String limit,
+      Long createdTimeStart, Long createdTimeEnd, String relatesTo,
+      String isRelatedTo, String infofilters, String conffilters,
+      String metricfilters, String eventfilters,
+      String fromid) throws TimelineParseException {
+    return new TimelineEntityFilters.Builder()
+        .entityLimit(parseLongStr(limit))
+        .createdTimeBegin(createdTimeStart)
+        .createTimeEnd(createdTimeEnd)
+        .relatesTo(parseRelationFilters(relatesTo))
+        .isRelatedTo(parseRelationFilters(isRelatedTo))
+        .infoFilters(parseKVFilters(infofilters, false))
+        .configFilters(parseKVFilters(conffilters, true))
+        .metricFilters(parseMetricFilters(metricfilters))
+        .eventFilters(parseEventFilters(eventfilters))
+        .fromId(parseStr(fromid)).build();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b87b72b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
index 35af169..1bc66db 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestFileSystemTimelineReaderImpl.java
@@ -398,7 +398,7 @@ public class TestFileSystemTimelineReaderImpl {
   public void testGetAllEntities() throws Exception {
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
-        "app", null), new TimelineEntityFilters(),
+        "app", null), new TimelineEntityFilters.Builder().build(),
         new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null));
     // All 4 entities will be returned
     Assert.assertEquals(4, result.size());
@@ -409,8 +409,8 @@ public class TestFileSystemTimelineReaderImpl {
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(2L, null, null, null, null, null, null,
-        null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().entityLimit(2L).build(),
+        new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     // Needs to be rewritten once hashcode and equals for
     // TimelineEntity is implemented
@@ -424,8 +424,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(3L, null, null, null, null, null, null,
-        null, null), new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().entityLimit(3L).build(),
+        new TimelineDataToRetrieve());
     // Even though 2 entities out of 4 have same created time, one entity
     // is left out due to limit
     Assert.assertEquals(3, result.size());
@@ -437,8 +437,8 @@ public class TestFileSystemTimelineReaderImpl {
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, 1425016502030L, 1425016502060L, null,
-        null, null, null, null, null),
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502030L)
+            .createTimeEnd(1425016502060L).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     // Only one entity with ID id_4 should be returned.
@@ -452,9 +452,9 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
             "app", null),
-            new TimelineEntityFilters(null, null, 1425016502010L, null, null,
-            null, null, null, null),
-            new TimelineDataToRetrieve());
+        new TimelineEntityFilters.Builder().createTimeEnd(1425016502010L)
+            .build(),
+        new TimelineDataToRetrieve());
     Assert.assertEquals(3, result.size());
     for (TimelineEntity entity : result) {
       if (entity.getId().equals("id_4")) {
@@ -466,8 +466,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, 1425016502010L, null, null, null,
-        null, null, null, null),
+        new TimelineEntityFilters.Builder().createdTimeBegin(1425016502010L)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -486,8 +486,7 @@ public class TestFileSystemTimelineReaderImpl {
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     // Only one entity with ID id_3 should be returned.
@@ -506,8 +505,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -525,8 +524,7 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        null, eventFilters),
+        new TimelineEntityFilters.Builder().eventFilters(eventFilters).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -542,8 +540,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     // Two entities with IDs' id_1 and id_2 should be returned.
@@ -569,8 +567,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList1, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList1)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -592,8 +590,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList2, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList2)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -610,8 +608,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList3, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList3)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for(TimelineEntity entity : result) {
@@ -628,8 +626,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList4, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList4)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(0, result.size());
 
@@ -641,8 +639,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null,
-        confFilterList5, null, null),
+        new TimelineEntityFilters.Builder().configFilters(confFilterList5)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -665,8 +663,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList1, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList1)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     // Two entities with IDs' id_2 and id_3 should be returned.
@@ -684,8 +682,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList2, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList2)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -702,8 +700,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList3, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList3)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(0, result.size());
 
@@ -715,8 +713,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList4, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList4)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -731,8 +729,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, null, null,
-        metricFilterList5, null),
+        new TimelineEntityFilters.Builder().metricFilters(metricFilterList5)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -749,8 +747,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList1,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList1)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(0, result.size());
 
@@ -762,8 +760,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList2,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList2)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     for (TimelineEntity entity : result) {
@@ -780,8 +778,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList3,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList3)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(0, result.size());
 
@@ -793,8 +791,8 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, null, infoFilterList4,
-        null, null, null),
+        new TimelineEntityFilters.Builder().infoFilters(infoFilterList4)
+            .build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     for (TimelineEntity entity : result) {
@@ -815,8 +813,7 @@ public class TestFileSystemTimelineReaderImpl {
     Set<TimelineEntity> result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, relatesTo, null, null,
-        null, null, null),
+        new TimelineEntityFilters.Builder().relatesTo(relatesTo).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(1, result.size());
     // Only one entity with ID id_1 should be returned.
@@ -835,8 +832,7 @@ public class TestFileSystemTimelineReaderImpl {
     result = reader.getEntities(
         new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1",
         "app", null),
-        new TimelineEntityFilters(null, null, null, null, isRelatedTo, null,
-        null, null, null),
+        new TimelineEntityFilters.Builder().isRelatedTo(isRelatedTo).build(),
         new TimelineDataToRetrieve());
     Assert.assertEquals(2, result.size());
     // Two entities with IDs' id_1 and id_3 should be returned.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-7006. [ATSv2 Security] Changes for authentication for CollectorNodemanagerProtocol. Contributed by Varun Saxena

Posted by st...@apache.org.
YARN-7006. [ATSv2 Security] Changes for authentication for CollectorNodemanagerProtocol. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6645695
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6645695
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6645695

Branch: refs/heads/HADOOP-13345
Commit: b664569586db39647f15340ce82ccc0f0869897e
Parents: d5ff965
Author: Jian He <ji...@apache.org>
Authored: Wed Aug 16 11:01:06 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../collectormanager/NMCollectorService.java    |  7 +-
 .../containermanager/AuxServices.java           |  3 +-
 .../timelineservice/NMTimelinePublisher.java    | 29 ++++++--
 .../CollectorNodemanagerSecurityInfo.java       | 69 ++++++++++++++++++++
 .../org.apache.hadoop.security.SecurityInfo     | 14 ++++
 5 files changed, 112 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6645695/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
index a5ffc74..7db6d70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
@@ -73,13 +73,13 @@ public class NMCollectorService extends CompositeService implements
 
     Configuration serverConf = new Configuration(conf);
 
-    // TODO Security settings.
     YarnRPC rpc = YarnRPC.create(conf);
 
+    // Kerberos based authentication to be used for CollectorNodemanager
+    // protocol if security is enabled.
     server =
         rpc.getServer(CollectorNodemanagerProtocol.class, this,
-            collectorServerAddress, serverConf,
-            this.context.getNMTokenSecretManager(),
+            collectorServerAddress, serverConf, null,
             conf.getInt(YarnConfiguration.NM_COLLECTOR_SERVICE_THREAD_COUNT,
                 YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_THREAD_COUNT));
 
@@ -94,7 +94,6 @@ public class NMCollectorService extends CompositeService implements
     LOG.info("NMCollectorService started at " + collectorServerAddress);
   }
 
-
   @Override
   public void serviceStop() throws Exception {
     if (server != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6645695/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index 2efc932..5e0f293 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -244,7 +244,8 @@ public class AuxServices extends AbstractService
         for (AuxiliaryService serv : serviceMap.values()) {
           try {
             serv.initializeContainer(new ContainerInitializationContext(
-                event.getUser(), event.getContainer().getContainerId(),
+                event.getContainer().getUser(),
+                event.getContainer().getContainerId(),
                 event.getContainer().getResource(), event.getContainer()
                 .getContainerTokenIdentifier().getContainerType()));
           } catch (Throwable th) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6645695/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
index c2ac5dc..34eddf7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/timelineservice/NMTimelinePublisher.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.nodemanager.timelineservice;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -26,6 +27,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -78,6 +80,8 @@ public class NMTimelinePublisher extends CompositeService {
 
   private String httpAddress;
 
+  private UserGroupInformation nmLoginUGI;
+
   private final Map<ApplicationId, TimelineV2Client> appToClientMap;
 
   public NMTimelinePublisher(Context context) {
@@ -92,6 +96,9 @@ public class NMTimelinePublisher extends CompositeService {
     dispatcher.register(NMTimelineEventType.class,
         new ForwardingEventHandler());
     addIfService(dispatcher);
+    this.nmLoginUGI =  UserGroupInformation.isSecurityEnabled() ?
+        UserGroupInformation.getLoginUser() :
+        UserGroupInformation.getCurrentUser();
     super.serviceInit(conf);
   }
 
@@ -399,11 +406,23 @@ public class NMTimelinePublisher extends CompositeService {
 
   public void createTimelineClient(ApplicationId appId) {
     if (!appToClientMap.containsKey(appId)) {
-      TimelineV2Client timelineClient =
-          TimelineV2Client.createTimelineClient(appId);
-      timelineClient.init(getConfig());
-      timelineClient.start();
-      appToClientMap.put(appId, timelineClient);
+      try {
+        TimelineV2Client timelineClient =
+            nmLoginUGI.doAs(new PrivilegedExceptionAction<TimelineV2Client>() {
+              @Override
+              public TimelineV2Client run() throws Exception {
+                TimelineV2Client timelineClient =
+                    TimelineV2Client.createTimelineClient(appId);
+                timelineClient.init(getConfig());
+                timelineClient.start();
+                return timelineClient;
+              }
+            });
+        appToClientMap.put(appId, timelineClient);
+      } catch (IOException | InterruptedException | RuntimeException |
+          Error e) {
+        LOG.warn("Unable to create timeline client for app " + appId, e);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6645695/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/CollectorNodemanagerSecurityInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/CollectorNodemanagerSecurityInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/CollectorNodemanagerSecurityInfo.java
new file mode 100644
index 0000000..0eb5ee5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/security/CollectorNodemanagerSecurityInfo.java
@@ -0,0 +1,69 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.timelineservice.security;
+
+import java.lang.annotation.Annotation;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocolPB;
+
+/**
+ * SecurityInfo implementation for CollectorNodemanager protocol.
+ */
+@Public
+@Evolving
+public class CollectorNodemanagerSecurityInfo extends SecurityInfo {
+
+  @Override
+  public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
+    if (!protocol
+        .equals(CollectorNodemanagerProtocolPB.class)) {
+      return null;
+    }
+    return new KerberosInfo() {
+
+      @Override
+      public Class<? extends Annotation> annotationType() {
+        return null;
+      }
+
+      @Override
+      public String serverPrincipal() {
+        return YarnConfiguration.NM_PRINCIPAL;
+      }
+
+      @Override
+      public String clientPrincipal() {
+        return null;
+      }
+    };
+  }
+
+  @Override
+  public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
+    return null;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6645695/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
new file mode 100644
index 0000000..4389219
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo
@@ -0,0 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.yarn.server.timelineservice.security.CollectorNodemanagerSecurityInfo


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-6130. [ATSv2 Security] Generate a delegation token for AM when app collector is created and pass it to AM via NM and RM. Contributed by Varun Saxena.

Posted by st...@apache.org.
YARN-6130. [ATSv2 Security] Generate a delegation token for AM when app collector is created and pass it to AM via NM and RM. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7594d1de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7594d1de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7594d1de

Branch: refs/heads/HADOOP-13345
Commit: 7594d1de7bbc34cd2e64202095a5e1757154d7d0
Parents: 9f65405
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Mon Jul 31 17:26:34 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../v2/app/rm/RMContainerAllocator.java         |   9 +-
 .../api/protocolrecords/AllocateResponse.java   |  32 ++--
 .../hadoop/yarn/api/records/CollectorInfo.java  |  55 +++++++
 .../src/main/proto/yarn_protos.proto            |   5 +
 .../src/main/proto/yarn_service_protos.proto    |   2 +-
 .../api/async/impl/AMRMClientAsyncImpl.java     |   6 +-
 .../ApplicationMasterServiceProtoTestBase.java  |  72 +++++++++
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  20 ++-
 ...ationMasterServiceProtocolForTimelineV2.java |  71 +++++++++
 ...estApplicationMasterServiceProtocolOnHA.java |  46 +-----
 .../api/async/impl/TestAMRMClientAsync.java     |   2 +-
 .../impl/pb/AllocateResponsePBImpl.java         |  37 ++++-
 .../records/impl/pb/CollectorInfoPBImpl.java    | 148 +++++++++++++++++++
 .../hadoop/yarn/api/TestPBImplRecords.java      |   2 +
 .../ReportNewCollectorInfoRequest.java          |   5 +-
 .../impl/pb/NodeHeartbeatRequestPBImpl.java     |  25 +++-
 .../impl/pb/NodeHeartbeatResponsePBImpl.java    |  21 ++-
 .../pb/ReportNewCollectorInfoRequestPBImpl.java |   4 +-
 .../server/api/records/AppCollectorData.java    |  27 +++-
 .../records/impl/pb/AppCollectorDataPBImpl.java |  29 +++-
 .../yarn_server_common_service_protos.proto     |   2 +
 .../java/org/apache/hadoop/yarn/TestRPC.java    |  30 +++-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |   4 +-
 .../nodemanager/NodeStatusUpdaterImpl.java      |   1 -
 .../application/ApplicationImpl.java            |   1 -
 .../ApplicationMasterService.java               |   2 -
 .../resourcemanager/DefaultAMSProcessor.java    |   8 +-
 .../server/resourcemanager/rmapp/RMApp.java     |  15 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java |  10 +-
 .../applicationsmanager/MockAsm.java            |   6 +
 .../server/resourcemanager/rmapp/MockRMApp.java |   6 +
 .../TestTimelineServiceClientIntegration.java   |   2 +-
 .../security/TestTimelineAuthFilterForV2.java   | 121 +++++++++++----
 .../collector/AppLevelTimelineCollector.java    |  24 +++
 .../AppLevelTimelineCollectorWithAgg.java       |   4 +-
 .../collector/NodeTimelineCollectorManager.java |  83 +++++++++--
 .../PerNodeTimelineCollectorsAuxService.java    |   7 +-
 ...neV2DelegationTokenSecretManagerService.java |  31 ++++
 .../TestNMTimelineCollectorManager.java         |   4 +-
 39 files changed, 829 insertions(+), 150 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 0952797..969ec4c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -878,13 +878,16 @@ public class RMContainerAllocator extends RMContainerRequestor
     handleUpdatedNodes(response);
     handleJobPriorityChange(response);
     // handle receiving the timeline collector address for this app
-    String collectorAddr = response.getCollectorAddr();
+    String collectorAddr = null;
+    if (response.getCollectorInfo() != null) {
+      collectorAddr = response.getCollectorInfo().getCollectorAddr();
+    }
+
     MRAppMaster.RunningAppContext appContext =
         (MRAppMaster.RunningAppContext)this.getContext();
     if (collectorAddr != null && !collectorAddr.isEmpty()
         && appContext.getTimelineV2Client() != null) {
-      appContext.getTimelineV2Client().setTimelineServiceAddress(
-          response.getCollectorAddr());
+      appContext.getTimelineV2Client().setTimelineServiceAddress(collectorAddr);
     }
 
     for (ContainerStatus cont : finishedContainers) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
index d3ca765..9b254ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -92,21 +93,21 @@ public abstract class AllocateResponse {
         .preemptionMessage(preempt).nmTokens(nmTokens).build();
   }
 
-  @Public
+  @Private
   @Unstable
   public static AllocateResponse newInstance(int responseId,
       List<ContainerStatus> completedContainers,
       List<Container> allocatedContainers, List<NodeReport> updatedNodes,
       Resource availResources, AMCommand command, int numClusterNodes,
       PreemptionMessage preempt, List<NMToken> nmTokens,
-      List<UpdatedContainer> updatedContainers) {
+      CollectorInfo collectorInfo) {
     return AllocateResponse.newBuilder().numClusterNodes(numClusterNodes)
         .responseId(responseId)
         .completedContainersStatuses(completedContainers)
         .allocatedContainers(allocatedContainers).updatedNodes(updatedNodes)
         .availableResources(availResources).amCommand(command)
         .preemptionMessage(preempt).nmTokens(nmTokens)
-        .updatedContainers(updatedContainers).build();
+        .collectorInfo(collectorInfo).build();
   }
 
   @Private
@@ -133,7 +134,7 @@ public abstract class AllocateResponse {
       List<Container> allocatedContainers, List<NodeReport> updatedNodes,
       Resource availResources, AMCommand command, int numClusterNodes,
       PreemptionMessage preempt, List<NMToken> nmTokens, Token amRMToken,
-      List<UpdatedContainer> updatedContainers, String collectorAddr) {
+      List<UpdatedContainer> updatedContainers, CollectorInfo collectorInfo) {
     return AllocateResponse.newBuilder().numClusterNodes(numClusterNodes)
         .responseId(responseId)
         .completedContainersStatuses(completedContainers)
@@ -141,7 +142,7 @@ public abstract class AllocateResponse {
         .availableResources(availResources).amCommand(command)
         .preemptionMessage(preempt).nmTokens(nmTokens)
         .updatedContainers(updatedContainers).amRmToken(amRMToken)
-        .collectorAddr(collectorAddr).build();
+        .collectorInfo(collectorInfo).build();
   }
 
   /**
@@ -333,17 +334,18 @@ public abstract class AllocateResponse {
   public abstract void setApplicationPriority(Priority priority);
 
   /**
-   * The address of collector that belong to this app
+   * The data associated with the collector that belongs to this app. Contains
+   * address and token alongwith identification information.
    *
-   * @return The address of collector that belong to this attempt
+   * @return The data of collector that belong to this attempt
    */
   @Public
   @Unstable
-  public abstract String getCollectorAddr();
+  public abstract CollectorInfo getCollectorInfo();
 
   @Private
   @Unstable
-  public abstract void setCollectorAddr(String collectorAddr);
+  public abstract void setCollectorInfo(CollectorInfo info);
 
   /**
    * Get the list of container update errors to inform the
@@ -559,15 +561,17 @@ public abstract class AllocateResponse {
     }
 
     /**
-     * Set the <code>collectorAddr</code> of the response.
-     * @see AllocateResponse#setCollectorAddr(String)
-     * @param collectorAddr <code>collectorAddr</code> of the response
+     * Set the <code>collectorInfo</code> of the response.
+     * @see AllocateResponse#setCollectorInfo(CollectorInfo)
+     * @param collectorInfo <code>collectorInfo</code> of the response which
+     *    contains collector address, RM id, version and collector token.
      * @return {@link AllocateResponseBuilder}
      */
     @Private
     @Unstable
-    public AllocateResponseBuilder collectorAddr(String collectorAddr) {
-      allocateResponse.setCollectorAddr(collectorAddr);
+    public AllocateResponseBuilder collectorInfo(
+        CollectorInfo collectorInfo) {
+      allocateResponse.setCollectorInfo(collectorInfo);
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
new file mode 100644
index 0000000..960c992
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/CollectorInfo.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Collector info containing collector address and collector token passed from
+ * RM to AM in Allocate Response.
+ */
+@Private
+@InterfaceStability.Unstable
+public abstract class CollectorInfo {
+
+  protected static final long DEFAULT_TIMESTAMP_VALUE = -1;
+
+  public static CollectorInfo newInstance(String collectorAddr, Token token) {
+    CollectorInfo amCollectorInfo =
+        Records.newRecord(CollectorInfo.class);
+    amCollectorInfo.setCollectorAddr(collectorAddr);
+    amCollectorInfo.setCollectorToken(token);
+    return amCollectorInfo;
+  }
+
+  public abstract String getCollectorAddr();
+
+  public abstract void setCollectorAddr(String addr);
+
+  /**
+   * Get delegation token for app collector which AM will use to publish
+   * entities.
+   * @return the delegation token for app collector.
+   */
+  public abstract Token getCollectorToken();
+
+  public abstract void setCollectorToken(Token token);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 81ebd79..c5f485f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -613,3 +613,8 @@ message StringBytesMapProto {
   optional string key = 1;
   optional bytes value = 2;
 }
+
+message CollectorInfoProto {
+  optional string collector_addr = 1;
+  optional hadoop.common.TokenProto collector_token = 2;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index b92c46e..7a7f035 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -112,7 +112,7 @@ message AllocateResponseProto {
   repeated NMTokenProto nm_tokens = 9;
   optional hadoop.common.TokenProto am_rm_token = 12;
   optional PriorityProto application_priority = 13;
-  optional string collector_addr = 14;
+  optional CollectorInfoProto collector_info = 14;
   repeated UpdateContainerErrorProto update_errors = 15;
   repeated UpdatedContainerProto updated_containers = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
index 6711da2..265badb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
@@ -325,7 +325,11 @@ extends AMRMClientAsync<T> {
           }
 
           AllocateResponse response = (AllocateResponse) object;
-          String collectorAddress = response.getCollectorAddr();
+          String collectorAddress = null;
+          if (response.getCollectorInfo() != null) {
+            collectorAddress = response.getCollectorInfo().getCollectorAddr();
+          }
+
           TimelineV2Client timelineClient =
               client.getRegisteredTimelineV2Client();
           if (timelineClient != null && collectorAddress != null

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ApplicationMasterServiceProtoTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ApplicationMasterServiceProtoTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ApplicationMasterServiceProtoTestBase.java
new file mode 100644
index 0000000..4521018
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ApplicationMasterServiceProtoTestBase.java
@@ -0,0 +1,72 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.junit.After;
+
+/**
+ * Test Base for Application Master Service Protocol.
+ */
+public abstract class ApplicationMasterServiceProtoTestBase
+    extends ProtocolHATestBase {
+
+  private ApplicationMasterProtocol amClient;
+  private ApplicationAttemptId attemptId;
+
+  protected void startupHAAndSetupClient() throws Exception {
+    attemptId = this.cluster.createFakeApplicationAttemptId();
+
+    Token<AMRMTokenIdentifier> appToken =
+        this.cluster.getResourceManager().getRMContext()
+          .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId);
+    appToken.setService(ClientRMProxy.getAMRMTokenService(this.conf));
+    UserGroupInformation.setLoginUser(UserGroupInformation
+        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
+    UserGroupInformation.getCurrentUser().addToken(appToken);
+    syncToken(appToken);
+    amClient = ClientRMProxy
+        .createRMProxy(this.conf, ApplicationMasterProtocol.class);
+  }
+
+  @After
+  public void shutDown() {
+    if(this.amClient != null) {
+      RPC.stopProxy(this.amClient);
+    }
+  }
+
+  protected ApplicationMasterProtocol getAMClient() {
+    return amClient;
+  }
+
+  private void syncToken(Token<AMRMTokenIdentifier> token) throws IOException {
+    for (int i = 0; i < this.cluster.getNumOfResourceManager(); i++) {
+      this.cluster.getResourceManager(i).getRMContext()
+          .getAMRMTokenSecretManager().addPersistedPassword(token);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index a8e9132..f4005e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -804,11 +805,20 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
     }
 
     public AllocateResponse createFakeAllocateResponse() {
-      return AllocateResponse.newInstance(-1,
-          new ArrayList<ContainerStatus>(),
-          new ArrayList<Container>(), new ArrayList<NodeReport>(),
-          Resource.newInstance(1024, 2), null, 1,
-          null, new ArrayList<NMToken>());
+      if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
+        return AllocateResponse.newInstance(-1,
+            new ArrayList<ContainerStatus>(), new ArrayList<Container>(),
+            new ArrayList<NodeReport>(), Resource.newInstance(1024, 2), null, 1,
+            null, new ArrayList<NMToken>(), CollectorInfo.newInstance(
+            "host:port", Token.newInstance(new byte[] {0}, "TIMELINE",
+            new byte[] {0}, "rm")));
+      } else {
+        return AllocateResponse.newInstance(-1,
+            new ArrayList<ContainerStatus>(),
+            new ArrayList<Container>(), new ArrayList<NodeReport>(),
+            Resource.newInstance(1024, 2), null, 1,
+            null, new ArrayList<NMToken>());
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolForTimelineV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolForTimelineV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolForTimelineV2.java
new file mode 100644
index 0000000..be8c302
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolForTimelineV2.java
@@ -0,0 +1,71 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.client;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
+import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests Application Master Protocol with timeline service v2 enabled.
+ */
+public class TestApplicationMasterServiceProtocolForTimelineV2
+    extends ApplicationMasterServiceProtoTestBase {
+
+  @Before
+  public void initialize() throws Exception {
+    HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE + 200, conf);
+    HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE + 200, conf);
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+    conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
+        FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+    startHACluster(0, false, false, true);
+    super.startupHAAndSetupClient();
+  }
+
+  @Test(timeout = 15000)
+  public void testAllocateForTimelineV2OnHA()
+      throws YarnException, IOException {
+    AllocateRequest request = AllocateRequest.newInstance(0, 50f,
+        new ArrayList<ResourceRequest>(),
+        new ArrayList<ContainerId>(),
+        ResourceBlacklistRequest.newInstance(new ArrayList<String>(),
+            new ArrayList<String>()));
+    AllocateResponse response = getAMClient().allocate(request);
+    Assert.assertEquals(response, this.cluster.createFakeAllocateResponse());
+    Assert.assertNotNull(response.getCollectorInfo());
+    Assert.assertEquals("host:port",
+        response.getCollectorInfo().getCollectorAddr());
+    Assert.assertNotNull(response.getCollectorInfo().getCollectorToken());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
index ad86fb3..c2f39a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
@@ -23,10 +23,6 @@ import java.util.ArrayList;
 
 import org.junit.Assert;
 
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
@@ -34,45 +30,20 @@ import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRespons
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
-import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 
 public class TestApplicationMasterServiceProtocolOnHA
-    extends ProtocolHATestBase {
-  private ApplicationMasterProtocol amClient;
-  private ApplicationAttemptId attemptId ;
-
+    extends ApplicationMasterServiceProtoTestBase {
   @Before
   public void initialize() throws Exception {
     startHACluster(0, false, false, true);
-    attemptId = this.cluster.createFakeApplicationAttemptId();
-
-    Token<AMRMTokenIdentifier> appToken =
-        this.cluster.getResourceManager().getRMContext()
-          .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId);
-    appToken.setService(ClientRMProxy.getAMRMTokenService(this.conf));
-    UserGroupInformation.setLoginUser(UserGroupInformation
-        .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
-    UserGroupInformation.getCurrentUser().addToken(appToken);
-    syncToken(appToken);
-
-    amClient = ClientRMProxy
-        .createRMProxy(this.conf, ApplicationMasterProtocol.class);
-  }
-
-  @After
-  public void shutDown() {
-    if(this.amClient != null) {
-      RPC.stopProxy(this.amClient);
-    }
+    super.startupHAAndSetupClient();
   }
 
   @Test(timeout = 15000)
@@ -81,7 +52,7 @@ public class TestApplicationMasterServiceProtocolOnHA
     RegisterApplicationMasterRequest request =
         RegisterApplicationMasterRequest.newInstance("localhost", 0, "");
     RegisterApplicationMasterResponse response =
-        amClient.registerApplicationMaster(request);
+        getAMClient().registerApplicationMaster(request);
     Assert.assertEquals(response,
         this.cluster.createFakeRegisterApplicationMasterResponse());
   }
@@ -93,7 +64,7 @@ public class TestApplicationMasterServiceProtocolOnHA
         FinishApplicationMasterRequest.newInstance(
             FinalApplicationStatus.SUCCEEDED, "", "");
     FinishApplicationMasterResponse response =
-        amClient.finishApplicationMaster(request);
+        getAMClient().finishApplicationMaster(request);
     Assert.assertEquals(response,
         this.cluster.createFakeFinishApplicationMasterResponse());
   }
@@ -105,14 +76,7 @@ public class TestApplicationMasterServiceProtocolOnHA
         new ArrayList<ContainerId>(),
         ResourceBlacklistRequest.newInstance(new ArrayList<String>(),
             new ArrayList<String>()));
-    AllocateResponse response = amClient.allocate(request);
+    AllocateResponse response = getAMClient().allocate(request);
     Assert.assertEquals(response, this.cluster.createFakeAllocateResponse());
   }
-
-  private void syncToken(Token<AMRMTokenIdentifier> token) throws IOException {
-    for (int i = 0; i < this.cluster.getNumOfResourceManager(); i++) {
-      this.cluster.getResourceManager(i).getRMContext()
-          .getAMRMTokenSecretManager().addPersistedPassword(token);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
index ba38340..56826c4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
@@ -426,7 +426,7 @@ public class TestAMRMClientAsync {
     }
     AllocateResponse response =
         AllocateResponse.newInstance(0, completed, allocated,
-            new ArrayList<NodeReport>(), null, null, 1, null, nmTokens,
+            new ArrayList<NodeReport>(), null, null, 1, null, nmTokens, null,
             updatedContainers);
     return response;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
index 1455d6a..ff35da8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -38,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
+import org.apache.hadoop.yarn.api.records.impl.pb.CollectorInfoPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerStatusPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.NMTokenPBImpl;
@@ -48,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.UpdatedContainerPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.CollectorInfoProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto;
@@ -80,6 +83,7 @@ public class AllocateResponsePBImpl extends AllocateResponse {
   private PreemptionMessage preempt;
   private Token amrmToken = null;
   private Priority appPriority = null;
+  private CollectorInfo collectorInfo = null;
 
   public AllocateResponsePBImpl() {
     builder = AllocateResponseProto.newBuilder();
@@ -162,6 +166,9 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     if (this.amrmToken != null) {
       builder.setAmRmToken(convertToProtoFormat(this.amrmToken));
     }
+    if (this.collectorInfo != null) {
+      builder.setCollectorInfo(convertToProtoFormat(this.collectorInfo));
+    }
     if (this.appPriority != null) {
       builder.setApplicationPriority(convertToProtoFormat(this.appPriority));
     }
@@ -398,19 +405,25 @@ public class AllocateResponsePBImpl extends AllocateResponse {
 
 
   @Override
-  public synchronized String getCollectorAddr() {
+  public synchronized CollectorInfo getCollectorInfo() {
     AllocateResponseProtoOrBuilder p = viaProto ? proto : builder;
-    return p.getCollectorAddr();
+    if (this.collectorInfo != null) {
+      return this.collectorInfo;
+    }
+    if (!p.hasCollectorInfo()) {
+      return null;
+    }
+    this.collectorInfo = convertFromProtoFormat(p.getCollectorInfo());
+    return this.collectorInfo;
   }
 
   @Override
-  public synchronized void setCollectorAddr(String collectorAddr) {
+  public synchronized void setCollectorInfo(CollectorInfo info) {
     maybeInitBuilder();
-    if (collectorAddr == null) {
-      builder.clearCollectorAddr();
-      return;
+    if (info == null) {
+      builder.clearCollectorInfo();
     }
-    builder.setCollectorAddr(collectorAddr);
+    this.collectorInfo = info;
   }
 
   @Override
@@ -718,6 +731,16 @@ public class AllocateResponsePBImpl extends AllocateResponse {
     return ((NodeReportPBImpl)t).getProto();
   }
 
+  private synchronized CollectorInfoPBImpl convertFromProtoFormat(
+      CollectorInfoProto p) {
+    return new CollectorInfoPBImpl(p);
+  }
+
+  private synchronized CollectorInfoProto convertToProtoFormat(
+      CollectorInfo t) {
+    return ((CollectorInfoPBImpl)t).getProto();
+  }
+
   private synchronized ContainerPBImpl convertFromProtoFormat(
       ContainerProto p) {
     return new ContainerPBImpl(p);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
new file mode 100644
index 0000000..bb54133
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/CollectorInfoPBImpl.java
@@ -0,0 +1,148 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.api.records.impl.pb;
+
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
+import org.apache.hadoop.yarn.api.records.Token;
+import org.apache.hadoop.yarn.proto.YarnProtos.CollectorInfoProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.CollectorInfoProtoOrBuilder;
+
+import com.google.protobuf.TextFormat;
+
+/**
+ * Protocol record implementation of {@link CollectorInfo}.
+ */
+public class CollectorInfoPBImpl extends CollectorInfo {
+
+  private CollectorInfoProto proto = CollectorInfoProto.getDefaultInstance();
+
+  private CollectorInfoProto.Builder builder = null;
+  private boolean viaProto = false;
+
+  private String collectorAddr = null;
+  private Token collectorToken = null;
+
+
+  public CollectorInfoPBImpl() {
+    builder = CollectorInfoProto.newBuilder();
+  }
+
+  public CollectorInfoPBImpl(CollectorInfoProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public CollectorInfoProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = CollectorInfoProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) {
+      maybeInitBuilder();
+    }
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+
+  @Override
+  public String getCollectorAddr() {
+    CollectorInfoProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.collectorAddr == null && p.hasCollectorAddr()) {
+      this.collectorAddr = p.getCollectorAddr();
+    }
+    return this.collectorAddr;
+  }
+
+  @Override
+  public void setCollectorAddr(String addr) {
+    maybeInitBuilder();
+    if (collectorAddr == null) {
+      builder.clearCollectorAddr();
+    }
+    this.collectorAddr = addr;
+  }
+
+  @Override
+  public Token getCollectorToken() {
+    CollectorInfoProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.collectorToken == null && p.hasCollectorToken()) {
+      this.collectorToken = convertFromProtoFormat(p.getCollectorToken());
+    }
+    return this.collectorToken;
+  }
+
+  @Override
+  public void setCollectorToken(Token token) {
+    maybeInitBuilder();
+    if (token == null) {
+      builder.clearCollectorToken();
+    }
+    this.collectorToken = token;
+  }
+
+  private TokenPBImpl convertFromProtoFormat(TokenProto p) {
+    return new TokenPBImpl(p);
+  }
+
+  private TokenProto convertToProtoFormat(Token t) {
+    return ((TokenPBImpl) t).getProto();
+  }
+
+  private void mergeLocalToBuilder() {
+    if (this.collectorAddr != null) {
+      builder.setCollectorAddr(this.collectorAddr);
+    }
+    if (this.collectorToken != null) {
+      builder.setCollectorToken(convertToProtoFormat(this.collectorToken));
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index bb688c9..a3f5491 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -101,6 +101,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersRequestP
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.StopContainersResponsePBImpl;
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl;
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -406,6 +407,7 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
     generateByNewInstance(CommitResponse.class);
     generateByNewInstance(ApplicationTimeout.class);
     generateByNewInstance(QueueConfigurations.class);
+    generateByNewInstance(CollectorInfo.class);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
index 1503eca..a4c1a38 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReportNewCollectorInfoRequest.java
@@ -22,6 +22,7 @@ import java.util.Arrays;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.util.Records;
 
@@ -37,11 +38,11 @@ public abstract class ReportNewCollectorInfoRequest {
   }
 
   public static ReportNewCollectorInfoRequest newInstance(
-      ApplicationId id, String collectorAddr) {
+      ApplicationId id, String collectorAddr, Token token) {
     ReportNewCollectorInfoRequest request =
         Records.newRecord(ReportNewCollectorInfoRequest.class);
     request.setAppCollectorsList(
-        Arrays.asList(AppCollectorData.newInstance(id, collectorAddr)));
+        Arrays.asList(AppCollectorData.newInstance(id, collectorAddr, token)));
     return request;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
index 73a8abe..c07a6eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatRequestPBImpl.java
@@ -26,23 +26,26 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.NodeLabelPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeStatusProto;
-import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.LogAggregationReportProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto.Builder;
 import org.apache.hadoop.yarn.server.api.protocolrecords.LogAggregationReport;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.server.api.records.impl.pb.MasterKeyPBImpl;
@@ -164,9 +167,13 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
     builder.clearRegisteringCollectors();
     for (Map.Entry<ApplicationId, AppCollectorData> entry :
         registeringCollectors.entrySet()) {
+      AppCollectorData data = entry.getValue();
       builder.addRegisteringCollectors(AppCollectorDataProto.newBuilder()
           .setAppId(convertToProtoFormat(entry.getKey()))
-          .setAppCollectorAddr(entry.getValue().getCollectorAddr()));
+          .setAppCollectorAddr(data.getCollectorAddr())
+          .setAppCollectorToken(convertToProtoFormat(data.getCollectorToken()))
+          .setRmIdentifier(data.getRMIdentifier())
+          .setVersion(data.getVersion()));
     }
   }
 
@@ -267,8 +274,10 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
       this.registeringCollectors = new HashMap<>();
       for (AppCollectorDataProto c : list) {
         ApplicationId appId = convertFromProtoFormat(c.getAppId());
+        Token collectorToken = convertFromProtoFormat(c.getAppCollectorToken());
         AppCollectorData data = AppCollectorData.newInstance(appId,
-            c.getAppCollectorAddr(), c.getRmIdentifier(), c.getVersion());
+            c.getAppCollectorAddr(), c.getRmIdentifier(), c.getVersion(),
+            collectorToken);
         this.registeringCollectors.put(appId, data);
       }
     }
@@ -309,6 +318,14 @@ public class NodeHeartbeatRequestPBImpl extends NodeHeartbeatRequest {
     return ((MasterKeyPBImpl)t).getProto();
   }
 
+  private TokenPBImpl convertFromProtoFormat(TokenProto p) {
+    return new TokenPBImpl(p);
+  }
+
+  private TokenProto convertToProtoFormat(Token t) {
+    return ((TokenPBImpl) t).getProto();
+  }
+
   @Override
   public Set<NodeLabel> getNodeLabels() {
     initNodeLabels();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
index ad81a1f..a1e6a21 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/NodeHeartbeatResponsePBImpl.java
@@ -26,31 +26,34 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SignalContainerRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
 import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.ContainerQueuingLimitProto;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SignalContainerRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.MasterKeyProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeActionProto;
-import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatResponseProtoOrBuilder;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.SystemCredentialsForAppsProto;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit;
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.api.records.NodeAction;
@@ -154,6 +157,8 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse {
       builder.addAppCollectors(AppCollectorDataProto.newBuilder()
           .setAppId(convertToProtoFormat(entry.getKey()))
           .setAppCollectorAddr(data.getCollectorAddr())
+          .setAppCollectorToken(
+              convertToProtoFormat(entry.getValue().getCollectorToken()))
           .setRmIdentifier(data.getRMIdentifier())
           .setVersion(data.getVersion()));
     }
@@ -599,8 +604,10 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse {
       this.appCollectorsMap = new HashMap<>();
       for (AppCollectorDataProto c : list) {
         ApplicationId appId = convertFromProtoFormat(c.getAppId());
+        Token collectorToken = convertFromProtoFormat(c.getAppCollectorToken());
         AppCollectorData data = AppCollectorData.newInstance(appId,
-            c.getAppCollectorAddr(), c.getRmIdentifier(), c.getVersion());
+            c.getAppCollectorAddr(), c.getRmIdentifier(), c.getVersion(),
+            collectorToken);
         this.appCollectorsMap.put(appId, data);
       }
     }
@@ -780,5 +787,13 @@ public class NodeHeartbeatResponsePBImpl extends NodeHeartbeatResponse {
       SignalContainerRequest t) {
     return ((SignalContainerRequestPBImpl)t).getProto();
   }
+
+  private TokenProto convertToProtoFormat(Token t) {
+    return ((TokenPBImpl) t).getProto();
+  }
+
+  private TokenPBImpl convertFromProtoFormat(TokenProto p) {
+    return new TokenPBImpl(p);
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java
index 3f3dcf5..5ffc3a2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/ReportNewCollectorInfoRequestPBImpl.java
@@ -20,12 +20,12 @@ package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
+import org.apache.hadoop.yarn.server.api.records.impl.pb.AppCollectorDataPBImpl;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.ReportNewCollectorInfoRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.ReportNewCollectorInfoRequestProtoOrBuilder;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoRequest;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
-import org.apache.hadoop.yarn.server.api.records.impl.pb.AppCollectorDataPBImpl;
 
 public class ReportNewCollectorInfoRequestPBImpl extends
     ReportNewCollectorInfoRequest {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorData.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorData.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorData.java
index da2e5de..5266dca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorData.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/AppCollectorData.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.api.records;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.util.Records;
 
 
@@ -31,20 +32,32 @@ public abstract class AppCollectorData {
   protected static final long DEFAULT_TIMESTAMP_VALUE = -1;
 
   public static AppCollectorData newInstance(
-      ApplicationId id, String collectorAddr, long rmIdentifier, long version) {
+      ApplicationId id, String collectorAddr, long rmIdentifier, long version,
+      Token token) {
     AppCollectorData appCollectorData =
         Records.newRecord(AppCollectorData.class);
     appCollectorData.setApplicationId(id);
     appCollectorData.setCollectorAddr(collectorAddr);
     appCollectorData.setRMIdentifier(rmIdentifier);
     appCollectorData.setVersion(version);
+    appCollectorData.setCollectorToken(token);
     return appCollectorData;
   }
 
+  public static AppCollectorData newInstance(
+      ApplicationId id, String collectorAddr, long rmIdentifier, long version) {
+    return newInstance(id, collectorAddr, rmIdentifier, version, null);
+  }
+
   public static AppCollectorData newInstance(ApplicationId id,
-      String collectorAddr) {
+      String collectorAddr, Token token) {
     return newInstance(id, collectorAddr, DEFAULT_TIMESTAMP_VALUE,
-        DEFAULT_TIMESTAMP_VALUE);
+        DEFAULT_TIMESTAMP_VALUE, token);
+  }
+
+  public static AppCollectorData newInstance(ApplicationId id,
+      String collectorAddr) {
+    return newInstance(id, collectorAddr, null);
   }
 
   /**
@@ -101,4 +114,12 @@ public abstract class AppCollectorData {
 
   public abstract void setVersion(long version);
 
+  /**
+   * Get delegation token for app collector which AM will use to publish
+   * entities.
+   * @return the delegation token for app collector.
+   */
+  public abstract Token getCollectorToken();
+
+  public abstract void setCollectorToken(Token token);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
index 7d3a805..7144f51 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/AppCollectorDataPBImpl.java
@@ -19,10 +19,11 @@ package org.apache.hadoop.yarn.server.api.records.impl.pb;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
-
+import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProto;
 import org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.AppCollectorDataProtoOrBuilder;
@@ -43,6 +44,7 @@ public class AppCollectorDataPBImpl extends AppCollectorData {
   private String collectorAddr = null;
   private Long rmIdentifier = null;
   private Long version = null;
+  private Token collectorToken = null;
 
   public AppCollectorDataPBImpl() {
     builder = AppCollectorDataProto.newBuilder();
@@ -158,6 +160,24 @@ public class AppCollectorDataPBImpl extends AppCollectorData {
     builder.setVersion(version);
   }
 
+  @Override
+  public Token getCollectorToken() {
+    AppCollectorDataProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.collectorToken == null && p.hasAppCollectorToken()) {
+      this.collectorToken = new TokenPBImpl(p.getAppCollectorToken());
+    }
+    return this.collectorToken;
+  }
+
+  @Override
+  public void setCollectorToken(Token token) {
+    maybeInitBuilder();
+    if (token == null) {
+      builder.clearAppCollectorToken();
+    }
+    this.collectorToken = token;
+  }
+
   private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) {
     return new ApplicationIdPBImpl(p);
   }
@@ -195,6 +215,9 @@ public class AppCollectorDataPBImpl extends AppCollectorData {
     if (this.version != null) {
       builder.setVersion(this.version);
     }
+    if (this.collectorToken != null) {
+      builder.setAppCollectorToken(
+          ((TokenPBImpl)this.collectorToken).getProto());
+    }
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
index c33b913..c2ba677 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_service_protos.proto
@@ -22,6 +22,7 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 package hadoop.yarn;
 
+import "Security.proto";
 import "yarn_protos.proto";
 import "yarn_server_common_protos.proto";
 import "yarn_service_protos.proto";
@@ -139,6 +140,7 @@ message AppCollectorDataProto {
   optional string app_collector_addr = 2;
   optional int64 rm_identifier = 3 [default = -1];
   optional int64 version = 4 [default = -1];
+  optional hadoop.common.TokenProto app_collector_token = 5;
 }
 
 //////////////////////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
index 8291197..82dfaea 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
@@ -72,6 +73,7 @@ import org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
+import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
@@ -93,6 +95,21 @@ public class TestRPC {
       "collectors' number in ReportNewCollectorInfoRequest is not ONE.";
 
   public static final String DEFAULT_COLLECTOR_ADDR = "localhost:0";
+  private static final Token DEFAULT_COLLECTOR_TOKEN;
+  static {
+    TimelineDelegationTokenIdentifier identifier =
+        new TimelineDelegationTokenIdentifier();
+    identifier.setOwner(new Text("user"));
+    identifier.setRenewer(new Text("user"));
+    identifier.setRealUser(new Text("user"));
+    long now = Time.now();
+    identifier.setIssueDate(now);
+    identifier.setMaxDate(now + 1000L);
+    identifier.setMasterKeyId(500);
+    identifier.setSequenceNumber(5);
+    DEFAULT_COLLECTOR_TOKEN = Token.newInstance(identifier.getBytes(),
+        identifier.getKind().toString(), identifier.getBytes(), "localhost:0");
+  }
 
   public static final ApplicationId DEFAULT_APP_ID =
       ApplicationId.newInstance(0, 0);
@@ -173,7 +190,16 @@ public class TestRPC {
     try {
       ReportNewCollectorInfoRequest request =
           ReportNewCollectorInfoRequest.newInstance(
-              DEFAULT_APP_ID, DEFAULT_COLLECTOR_ADDR);
+              DEFAULT_APP_ID, DEFAULT_COLLECTOR_ADDR, null);
+      proxy.reportNewCollectorInfo(request);
+    } catch (YarnException e) {
+      Assert.fail("RPC call failured is not expected here.");
+    }
+
+    try {
+      ReportNewCollectorInfoRequest request =
+          ReportNewCollectorInfoRequest.newInstance(
+              DEFAULT_APP_ID, DEFAULT_COLLECTOR_ADDR, DEFAULT_COLLECTOR_TOKEN);
       proxy.reportNewCollectorInfo(request);
     } catch (YarnException e) {
       Assert.fail("RPC call failured is not expected here.");
@@ -437,6 +463,8 @@ public class TestRPC {
             DEFAULT_APP_ID);
         Assert.assertEquals(appCollector.getCollectorAddr(),
             DEFAULT_COLLECTOR_ADDR);
+        Assert.assertTrue(appCollector.getCollectorToken() == null ||
+            appCollector.getCollectorToken().equals(DEFAULT_COLLECTOR_TOKEN));
       } else {
         throw new YarnException(ILLEGAL_NUMBER_MESSAGE);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
index 15071f3..a1890dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
@@ -351,7 +352,8 @@ public class TestYarnServerApiClasses {
   private Map<ApplicationId, AppCollectorData> getCollectors() {
     ApplicationId appID = ApplicationId.newInstance(1L, 1);
     String collectorAddr = "localhost:0";
-    AppCollectorData data = AppCollectorData.newInstance(appID, collectorAddr);
+    AppCollectorData data = AppCollectorData.newInstance(appID, collectorAddr,
+        Token.newInstance(new byte[0], "kind", new byte[0], "s"));
     Map<ApplicationId, AppCollectorData> collectorMap =
         new HashMap<>();
     collectorMap.put(appID, data);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 43cd135..35b7cb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -71,7 +71,6 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerRequest;
-
 import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.api.records.ContainerQueuingLimit;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
index 0097cd2..39be7a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.yarn.proto.YarnProtos;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
-import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServicesEventType;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
index a245a97..479aa43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
@@ -45,8 +45,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-
-
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 2d677a7..a993d69 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterReque
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
@@ -54,7 +55,6 @@ import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt
@@ -294,9 +294,9 @@ final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
     // add collector address for this application
     if (YarnConfiguration.timelineServiceV2Enabled(
         getRmContext().getYarnConfiguration())) {
-      AppCollectorData data = app.getCollectorData();
-      if (data != null) {
-        response.setCollectorAddr(data.getCollectorAddr());
+      CollectorInfo collectorInfo = app.getCollectorInfo();
+      if (collectorInfo != null) {
+        response.setCollectorInfo(collectorInfo);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index 1a0b920..93c41b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -187,14 +188,24 @@ public interface RMApp extends EventHandler<RMAppEvent> {
    * only if the timeline service v.2 is enabled.
    *
    * @return the data for the application's collector, including collector
-   * address, collector ID. Return null if the timeline service v.2 is not
-   * enabled.
+   * address, RM ID, version and collector token. Return null if the timeline
+   * service v.2 is not enabled.
    */
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
   AppCollectorData getCollectorData();
 
   /**
+   * The timeline collector information to be sent to AM. It should be used
+   * only if the timeline service v.2 is enabled.
+   *
+   * @return collector info, including collector address and collector token.
+   * Return null if the timeline service v.2 is not enabled.
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  CollectorInfo getCollectorInfo();
+  /**
    * The original tracking url for the application master.
    * @return the original tracking url for the application master.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index dbddc5f..13b0792 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeout;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -167,6 +168,7 @@ public class RMAppImpl implements RMApp, Recoverable {
   private int firstAttemptIdInStateStore = 1;
   private int nextAttemptId = 1;
   private AppCollectorData collectorData;
+  private CollectorInfo collectorInfo;
   // This field isn't protected by readlock now.
   private volatile RMAppAttempt currentAttempt;
   private String queue;
@@ -530,7 +532,7 @@ public class RMAppImpl implements RMApp, Recoverable {
    */
   public void startTimelineCollector() {
     AppLevelTimelineCollector collector =
-        new AppLevelTimelineCollector(applicationId);
+        new AppLevelTimelineCollector(applicationId, user);
     rmContext.getRMTimelineCollectorManager().putIfAbsent(
         applicationId, collector);
   }
@@ -618,6 +620,12 @@ public class RMAppImpl implements RMApp, Recoverable {
 
   public void setCollectorData(AppCollectorData incomingData) {
     this.collectorData = incomingData;
+    this.collectorInfo = CollectorInfo.newInstance(
+        incomingData.getCollectorAddr(), incomingData.getCollectorToken());
+  }
+
+  public CollectorInfo getCollectorInfo() {
+    return this.collectorInfo;
   }
 
   public void removeCollectorData() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 3234d6f..f826631 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -239,6 +240,11 @@ public abstract class MockAsm extends MockApps {
     public boolean isAppInCompletedStates() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
+
+    @Override
+    public CollectorInfo getCollectorInfo() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
   }
 
   public static RMApp newApplication(int i) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7594d1de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index b0d47a1..39a7f99 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ApplicationTimeoutType;
+import org.apache.hadoop.yarn.api.records.CollectorInfo;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -325,4 +326,9 @@ public class MockRMApp implements RMApp {
   public boolean isAppInCompletedStates() {
     return false;
   }
+
+  @Override
+  public CollectorInfo getCollectorInfo() {
+    throw new UnsupportedOperationException("Not supported yet.");
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: YARN-5648. [ATSv2 Security] Client side changes for authentication. Contributed by Varun Saxena

Posted by st...@apache.org.
YARN-5648. [ATSv2 Security] Client side changes for authentication. Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac7f52df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac7f52df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac7f52df

Branch: refs/heads/HADOOP-13345
Commit: ac7f52df83d2b4758e7debe9416be7db0ec69d2b
Parents: d3f11e3
Author: Jian He <ji...@apache.org>
Authored: Fri Jun 23 10:44:12 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../client/api/impl/TimelineV2ClientImpl.java   |  38 ++-
 .../hadoop-yarn-server-tests/pom.xml            |  11 +
 .../hadoop/yarn/server/TestRMNMSecretKeys.java  |  34 +-
 .../security/TestTimelineAuthFilterForV2.java   | 309 +++++++++++++++++++
 .../src/test/resources/krb5.conf                |  28 --
 5 files changed, 381 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac7f52df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
index 5d88f70..128ae7a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineV2ClientImpl.java
@@ -19,7 +19,10 @@
 package org.apache.hadoop.yarn.client.api.impl;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.lang.reflect.UndeclaredThrowableException;
 import java.net.URI;
+import java.security.PrivilegedExceptionAction;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -69,6 +72,8 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
 
   private ApplicationId contextAppId;
 
+  private UserGroupInformation authUgi;
+
   public TimelineV2ClientImpl(ApplicationId appId) {
     super(TimelineV2ClientImpl.class.getName());
     this.contextAppId = appId;
@@ -88,7 +93,6 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     UserGroupInformation realUgi = ugi.getRealUser();
     String doAsUser = null;
-    UserGroupInformation authUgi = null;
     if (realUgi != null) {
       authUgi = realUgi;
       doAsUser = ugi.getShortUserName();
@@ -192,19 +196,33 @@ public class TimelineV2ClientImpl extends TimelineV2Client {
     }
   }
 
+  private ClientResponse doPutObjects(URI base, String path,
+      MultivaluedMap<String, String> params, Object obj) {
+    return connector.getClient().resource(base).path(path).queryParams(params)
+        .accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON)
+        .put(ClientResponse.class, obj);
+  }
+
   protected void putObjects(URI base, String path,
       MultivaluedMap<String, String> params, Object obj)
       throws IOException, YarnException {
-    ClientResponse resp;
+    ClientResponse resp = null;
     try {
-      resp = connector.getClient().resource(base).path(path).queryParams(params)
-          .accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON)
-          .put(ClientResponse.class, obj);
-    } catch (RuntimeException re) {
-      // runtime exception is expected if the client cannot connect the server
-      String msg = "Failed to get the response from the timeline server.";
-      LOG.error(msg, re);
-      throw new IOException(re);
+      resp = authUgi.doAs(new PrivilegedExceptionAction<ClientResponse>() {
+        @Override
+        public ClientResponse run() throws Exception {
+          return doPutObjects(base, path, params, obj);
+        }
+      });
+    } catch (UndeclaredThrowableException ue) {
+      Throwable cause = ue.getCause();
+      if (cause instanceof IOException) {
+        throw (IOException)cause;
+      } else {
+        throw new IOException(cause);
+      }
+    } catch (InterruptedException ie) {
+      throw (IOException) new InterruptedIOException().initCause(ie);
     }
     if (resp == null || resp.getStatusInfo()
         .getStatusCode() != ClientResponse.Status.OK.getStatusCode()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac7f52df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
index f182236..babe9fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
@@ -125,6 +125,17 @@
       <groupId>commons-logging</groupId>
       <artifactId>commons-logging</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.bouncycastle</groupId>
+      <artifactId>bcprov-jdk16</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac7f52df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
index 8cc2dee..ba14491 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestRMNMSecretKeys.java
@@ -18,10 +18,13 @@
 
 package org.apache.hadoop.yarn.server;
 
+import java.io.File;
 import java.io.IOException;
+import java.util.UUID;
 
+import org.junit.AfterClass;
 import org.junit.Assert;
-
+import org.junit.BeforeClass;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -32,9 +35,38 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResp
 import org.apache.hadoop.yarn.server.api.records.MasterKey;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.kerby.util.IOUtil;
 import org.junit.Test;
 
 public class TestRMNMSecretKeys {
+  private static final String KRB5_CONF = "java.security.krb5.conf";
+  private static final File KRB5_CONF_ROOT_DIR = new File(
+      System.getProperty("test.build.dir", "target/test-dir"),
+          UUID.randomUUID().toString());
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    KRB5_CONF_ROOT_DIR.mkdir();
+    File krb5ConfFile = new File(KRB5_CONF_ROOT_DIR, "krb5.conf");
+    krb5ConfFile.createNewFile();
+    String content = "[libdefaults]\n" +
+        "    default_realm = APACHE.ORG\n" +
+        "    udp_preference_limit = 1\n"+
+        "    extra_addresses = 127.0.0.1\n" +
+        "[realms]\n" +
+        "    APACHE.ORG = {\n" +
+        "        admin_server = localhost:88\n" +
+        "        kdc = localhost:88\n}\n" +
+        "[domain_realm]\n" +
+        "    localhost = APACHE.ORG";
+    IOUtil.writeFile(content, krb5ConfFile);
+    System.setProperty(KRB5_CONF, krb5ConfFile.getAbsolutePath());
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    KRB5_CONF_ROOT_DIR.delete();
+  }
 
   @Test(timeout = 1000000)
   public void testNMUpdation() throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac7f52df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
new file mode 100644
index 0000000..608ef67
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/security/TestTimelineAuthFilterForV2.java
@@ -0,0 +1,309 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.security;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.client.api.TimelineV2Client;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocol;
+import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.GetTimelineCollectorContextResponse;
+import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
+import org.apache.hadoop.yarn.server.timelineservice.collector.NodeTimelineCollectorManager;
+import org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService;
+import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineReaderImpl;
+import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Tests timeline authentication filter based security for timeline service v2.
+ */
+@RunWith(Parameterized.class)
+public class TestTimelineAuthFilterForV2 {
+
+  private static final String FOO_USER = "foo";
+  private static final String HTTP_USER = "HTTP";
+
+  private static final File TEST_ROOT_DIR = new File(
+      System.getProperty("test.build.dir", "target" + File.separator +
+          "test-dir"), UUID.randomUUID().toString());
+  private static final String BASEDIR =
+      System.getProperty("test.build.dir", "target/test-dir") + "/"
+          + TestTimelineAuthFilterForV2.class.getSimpleName();
+  private static File httpSpnegoKeytabFile = new File(KerberosTestUtils.
+      getKeytabFile());
+  private static String httpSpnegoPrincipal = KerberosTestUtils.
+      getServerPrincipal();
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> withSsl() {
+    return Arrays.asList(new Object[][] {{false}, {true}});
+  }
+
+  private static MiniKdc testMiniKDC;
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static Configuration conf;
+  private boolean withSsl;
+  private NodeTimelineCollectorManager collectorManager;
+  private PerNodeTimelineCollectorsAuxService auxService;
+
+  public TestTimelineAuthFilterForV2(boolean withSsl) {
+    this.withSsl = withSsl;
+  }
+
+  @BeforeClass
+  public static void setup() {
+    try {
+      testMiniKDC = new MiniKdc(MiniKdc.createConf(), TEST_ROOT_DIR);
+      testMiniKDC.start();
+      testMiniKDC.createPrincipal(
+          httpSpnegoKeytabFile, HTTP_USER + "/localhost");
+    } catch (Exception e) {
+      fail("Couldn't setup MiniKDC.");
+    }
+
+    // Setup timeline service v2.
+    try {
+      conf = new Configuration(false);
+      conf.setStrings(TimelineAuthenticationFilterInitializer.PREFIX + "type",
+          "kerberos");
+      conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
+          KerberosAuthenticationHandler.PRINCIPAL, httpSpnegoPrincipal);
+      conf.set(TimelineAuthenticationFilterInitializer.PREFIX +
+          KerberosAuthenticationHandler.KEYTAB,
+          httpSpnegoKeytabFile.getAbsolutePath());
+      conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+          "kerberos");
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL,
+          httpSpnegoPrincipal);
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+          httpSpnegoKeytabFile.getAbsolutePath());
+      // Enable timeline service v2
+      conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+      conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+      conf.setClass(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLASS,
+          FileSystemTimelineWriterImpl.class, TimelineWriter.class);
+      conf.set(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST, "localhost");
+      conf.set(FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_DIR_ROOT,
+          TEST_ROOT_DIR.getAbsolutePath());
+      conf.set("hadoop.proxyuser.HTTP.hosts", "*");
+      conf.set("hadoop.proxyuser.HTTP.users", FOO_USER);
+      UserGroupInformation.setConfiguration(conf);
+      SecurityUtil.login(conf, YarnConfiguration.TIMELINE_SERVICE_KEYTAB,
+          YarnConfiguration.TIMELINE_SERVICE_PRINCIPAL, "localhost");
+    } catch (Exception e) {
+      fail("Couldn't setup TimelineServer V2.");
+    }
+  }
+
+  @Before
+  public void initialize() throws Exception {
+    if (withSsl) {
+      conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
+          HttpConfig.Policy.HTTPS_ONLY.name());
+      File base = new File(BASEDIR);
+      FileUtil.fullyDelete(base);
+      base.mkdirs();
+      keystoresDir = new File(BASEDIR).getAbsolutePath();
+      sslConfDir =
+          KeyStoreTestUtil.getClasspathDir(TestTimelineAuthFilterForV2.class);
+      KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+    } else  {
+      conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,
+          HttpConfig.Policy.HTTP_ONLY.name());
+    }
+    collectorManager = new DummyNodeTimelineCollectorManager();
+    auxService = PerNodeTimelineCollectorsAuxService.launchServer(new String[0],
+        collectorManager, conf);
+  }
+
+  private TimelineV2Client createTimelineClientForUGI(ApplicationId appId) {
+    TimelineV2Client client =
+        TimelineV2Client.createTimelineClient(ApplicationId.newInstance(0, 1));
+    // set the timeline service address.
+    String restBindAddr = collectorManager.getRestServerBindAddress();
+    String addr =
+        "localhost" + restBindAddr.substring(restBindAddr.indexOf(":"));
+    client.setTimelineServiceAddress(addr);
+    client.init(conf);
+    client.start();
+    return client;
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (testMiniKDC != null) {
+      testMiniKDC.stop();
+    }
+    FileUtil.fullyDelete(TEST_ROOT_DIR);
+  }
+
+  @After
+  public void destroy() throws Exception {
+    if (auxService != null) {
+      auxService.stop();
+    }
+    if (withSsl) {
+      KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+      File base = new File(BASEDIR);
+      FileUtil.fullyDelete(base);
+    }
+  }
+
+  private static TimelineEntity createEntity(String id, String type) {
+    TimelineEntity entityToStore = new TimelineEntity();
+    entityToStore.setId(id);
+    entityToStore.setType(type);
+    entityToStore.setCreatedTime(0L);
+    return entityToStore;
+  }
+
+  private static void verifyEntity(File entityTypeDir, String id, String type)
+      throws IOException {
+    File entityFile = new File(entityTypeDir, id +
+        FileSystemTimelineWriterImpl.TIMELINE_SERVICE_STORAGE_EXTENSION);
+    assertTrue(entityFile.exists());
+    TimelineEntity entity = readEntityFile(entityFile);
+    assertNotNull(entity);
+    assertEquals(id, entity.getId());
+    assertEquals(type, entity.getType());
+  }
+
+  private static TimelineEntity readEntityFile(File entityFile)
+      throws IOException {
+    BufferedReader reader = null;
+    String strLine;
+    try {
+      reader = new BufferedReader(new FileReader(entityFile));
+      while ((strLine = reader.readLine()) != null) {
+        if (strLine.trim().length() > 0) {
+          return FileSystemTimelineReaderImpl.
+              getTimelineRecordFromJSON(strLine.trim(), TimelineEntity.class);
+        }
+      }
+      return null;
+    } finally {
+      reader.close();
+    }
+  }
+
+  @Test
+  public void testPutTimelineEntities() throws Exception {
+    ApplicationId appId = ApplicationId.newInstance(0, 1);
+    auxService.addApplication(appId);
+    final String entityType = "dummy_type";
+    File entityTypeDir = new File(TEST_ROOT_DIR.getAbsolutePath() +
+        File.separator + "entities" + File.separator +
+        YarnConfiguration.DEFAULT_RM_CLUSTER_ID + File.separator + "test_user" +
+        File.separator + "test_flow_name" + File.separator +
+        "test_flow_version" + File.separator + "1" + File.separator +
+        appId.toString() + File.separator + entityType);
+    try {
+      KerberosTestUtils.doAs(HTTP_USER + "/localhost", new Callable<Void>() {
+        @Override
+        public Void call() throws Exception {
+          TimelineV2Client client = createTimelineClientForUGI(appId);
+          try {
+            // Sync call. Results available immediately.
+            client.putEntities(createEntity("entity1", entityType));
+            assertEquals(1, entityTypeDir.listFiles().length);
+            verifyEntity(entityTypeDir, "entity1", entityType);
+            // Async call.
+            client.putEntitiesAsync(createEntity("entity2", entityType));
+            return null;
+          } finally {
+            client.stop();
+          }
+        }
+      });
+      // Wait for async entity to be published.
+      for (int i = 0; i < 50; i++) {
+        if (entityTypeDir.listFiles().length == 2) {
+          break;
+        }
+        Thread.sleep(50);
+      }
+      assertEquals(2, entityTypeDir.listFiles().length);
+      verifyEntity(entityTypeDir, "entity2", entityType);
+    } finally {
+      FileUtils.deleteQuietly(entityTypeDir);
+    }
+  }
+
+  private static class DummyNodeTimelineCollectorManager extends
+      NodeTimelineCollectorManager {
+    DummyNodeTimelineCollectorManager() {
+      super();
+    }
+
+    @Override
+    protected CollectorNodemanagerProtocol getNMCollectorService() {
+      CollectorNodemanagerProtocol protocol =
+          mock(CollectorNodemanagerProtocol.class);
+      try {
+        GetTimelineCollectorContextResponse response =
+            GetTimelineCollectorContextResponse.newInstance("test_user",
+                "test_flow_name", "test_flow_version", 1L);
+        when(protocol.getTimelineCollectorContext(any(
+            GetTimelineCollectorContextRequest.class))).thenReturn(response);
+      } catch (YarnException | IOException e) {
+        fail();
+      }
+      return protocol;
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac7f52df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf
deleted file mode 100644
index 121ac6d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/resources/krb5.conf
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-[libdefaults]
-	default_realm = APACHE.ORG
-	udp_preference_limit = 1
-	extra_addresses = 127.0.0.1
-[realms]
-	APACHE.ORG = {
-		admin_server = localhost:88
-		kdc = localhost:88
-	}
-[domain_realm]
-	localhost = APACHE.ORG


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: YARN-6868. Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml. (Ray Chiang via Haibo Chen)

Posted by st...@apache.org.
YARN-6868. Add test scope to certain entries in hadoop-yarn-server-resourcemanager pom.xml. (Ray Chiang via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a20e7105
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a20e7105
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a20e7105

Branch: refs/heads/HADOOP-13345
Commit: a20e7105ea9d4e38d7f8f9fd48035e342bb22f1c
Parents: 9992675
Author: Haibo Chen <ha...@apache.org>
Authored: Wed Aug 30 09:13:49 2017 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Wed Aug 30 09:14:59 2017 -0700

----------------------------------------------------------------------
 .../hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a20e7105/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index 9b8f8af..6c016fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -203,6 +203,7 @@
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-test</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
@@ -212,9 +213,11 @@
       <groupId>org.fusesource.leveldbjni</groupId>
       <artifactId>leveldbjni-all</artifactId>
     </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
+      <scope>test</scope>
       <type>test-jar</type>
     </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HADOOP-14814. Fix incompatible API change on FsServerDefaults to HADOOP-14104. Contributed by Junping Du.

Posted by st...@apache.org.
HADOOP-14814. Fix incompatible API change on FsServerDefaults to HADOOP-14104. Contributed by Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41480233
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41480233
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41480233

Branch: refs/heads/HADOOP-13345
Commit: 41480233a9cfb0bcfb69cc0f1594120e7656f031
Parents: fd66a24
Author: Junping Du <ju...@apache.org>
Authored: Wed Aug 30 13:30:14 2017 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Wed Aug 30 13:30:14 2017 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/fs/FsServerDefaults.java    | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41480233/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
index 84a40d2..f639648 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsServerDefaults.java
@@ -63,6 +63,15 @@ public class FsServerDefaults implements Writable {
   public FsServerDefaults(long blockSize, int bytesPerChecksum,
       int writePacketSize, short replication, int fileBufferSize,
       boolean encryptDataTransfer, long trashInterval,
+      DataChecksum.Type checksumType) {
+    this(blockSize, bytesPerChecksum, writePacketSize, replication,
+        fileBufferSize, encryptDataTransfer, trashInterval, checksumType,
+        null, (byte) 0);
+  }
+
+  public FsServerDefaults(long blockSize, int bytesPerChecksum,
+      int writePacketSize, short replication, int fileBufferSize,
+      boolean encryptDataTransfer, long trashInterval,
       DataChecksum.Type checksumType, String keyProviderUri) {
     this(blockSize, bytesPerChecksum, writePacketSize, replication,
         fileBufferSize, encryptDataTransfer, trashInterval, checksumType,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: YARN-6850 Ensure that supplemented timestamp is stored only for flow run metrics (Contributed by Varun Saxena via Vrushali C)

Posted by st...@apache.org.
YARN-6850 Ensure that supplemented timestamp is stored only for flow run metrics (Contributed by Varun Saxena via Vrushali C)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61136d03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61136d03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61136d03

Branch: refs/heads/HADOOP-13345
Commit: 61136d03f25377c62aefde859c82df18e37b975e
Parents: 70078e9
Author: Vrushali C <vr...@apache.org>
Authored: Mon Jul 24 15:54:52 2017 -0700
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../storage/common/ColumnHelper.java            | 44 +++++++++++++++-----
 .../common/HBaseTimelineStorageUtils.java       | 10 +----
 .../storage/flow/FlowRunColumnPrefix.java       |  2 +-
 .../storage/reader/ApplicationEntityReader.java |  8 ++--
 .../storage/reader/GenericEntityReader.java     |  8 ++--
 5 files changed, 44 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61136d03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
index 46e427e..9f95d44 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -52,11 +52,28 @@ public class ColumnHelper<T> {
 
   private final ValueConverter converter;
 
+  private final boolean supplementTs;
+
   public ColumnHelper(ColumnFamily<T> columnFamily) {
     this(columnFamily, GenericConverter.getInstance());
   }
 
   public ColumnHelper(ColumnFamily<T> columnFamily, ValueConverter converter) {
+    this(columnFamily, converter, false);
+  }
+
+  /**
+   * @param columnFamily column family implementation.
+   * @param converter converter use to encode/decode values stored in the column
+   *     or column prefix.
+   * @param needSupplementTs flag to indicate if cell timestamp needs to be
+   *     modified for this column by calling
+   *     {@link TimestampGenerator#getSupplementedTimestamp(long, String)}. This
+   *     would be required for columns(such as metrics in flow run table) where
+   *     potential collisions can occur due to same timestamp.
+   */
+  public ColumnHelper(ColumnFamily<T> columnFamily, ValueConverter converter,
+      boolean needSupplementTs) {
     this.columnFamily = columnFamily;
     columnFamilyBytes = columnFamily.getBytes();
     if (converter == null) {
@@ -64,6 +81,7 @@ public class ColumnHelper<T> {
     } else {
       this.converter = converter;
     }
+    this.supplementTs = needSupplementTs;
   }
 
   /**
@@ -106,18 +124,24 @@ public class ColumnHelper<T> {
   }
 
   /*
-   * Figures out the cell timestamp used in the Put For storing into flow run
-   * table. We would like to left shift the timestamp and supplement it with the
-   * AppId id so that there are no collisions in the flow run table's cells
+   * Figures out the cell timestamp used in the Put For storing.
+   * Will supplement the timestamp if required. Typically done for flow run
+   * table.If we supplement the timestamp, we left shift the timestamp and
+   * supplement it with the AppId id so that there are no collisions in the flow
+   * run table's cells.
    */
   private long getPutTimestamp(Long timestamp, Attribute[] attributes) {
     if (timestamp == null) {
       timestamp = System.currentTimeMillis();
     }
-    String appId = getAppIdFromAttributes(attributes);
-    long supplementedTS = TimestampGenerator.getSupplementedTimestamp(
-        timestamp, appId);
-    return supplementedTS;
+    if (!this.supplementTs) {
+      return timestamp;
+    } else {
+      String appId = getAppIdFromAttributes(attributes);
+      long supplementedTS = TimestampGenerator.getSupplementedTimestamp(
+          timestamp, appId);
+      return supplementedTS;
+    }
   }
 
   private String getAppIdFromAttributes(Attribute[] attributes) {
@@ -234,9 +258,9 @@ public class ColumnHelper<T> {
               for (Entry<Long, byte[]> cell : cells.entrySet()) {
                 V value =
                     (V) converter.decodeValue(cell.getValue());
-                cellResults.put(
-                    TimestampGenerator.getTruncatedTimestamp(cell.getKey()),
-                    value);
+                Long ts = supplementTs ? TimestampGenerator.
+                    getTruncatedTimestamp(cell.getKey()) : cell.getKey();
+                cellResults.put(ts, value);
               }
             }
             results.put(converterColumnKey, cellResults);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61136d03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index 97e70b8..b0d8527 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -347,16 +347,8 @@ public final class HBaseTimelineStorageUtils {
   public static void setMetricsTimeRange(Query query, byte[] metricsCf,
       long tsBegin, long tsEnd) {
     if (tsBegin != 0 || tsEnd != Long.MAX_VALUE) {
-      long supplementedTsBegin = tsBegin == 0 ? 0 :
-          TimestampGenerator.getSupplementedTimestamp(tsBegin, null);
-      long supplementedTsEnd =
-          (tsEnd == Long.MAX_VALUE) ? Long.MAX_VALUE :
-          TimestampGenerator.getSupplementedTimestamp(tsEnd + 1, null);
-      // Handle overflow by resetting time begin to 0 and time end to
-      // Long#MAX_VALUE, if required.
       query.setColumnFamilyTimeRange(metricsCf,
-          ((supplementedTsBegin < 0) ? 0 : supplementedTsBegin),
-          ((supplementedTsEnd < 0) ? Long.MAX_VALUE : supplementedTsEnd));
+          tsBegin, ((tsEnd == Long.MAX_VALUE) ? Long.MAX_VALUE : (tsEnd + 1)));
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61136d03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
index 103674e..f521cd7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
@@ -69,7 +69,7 @@ public enum FlowRunColumnPrefix implements ColumnPrefix<FlowRunTable> {
   private FlowRunColumnPrefix(ColumnFamily<FlowRunTable> columnFamily,
       String columnPrefix, AggregationOperation fra, ValueConverter converter,
       boolean compoundColQual) {
-    column = new ColumnHelper<FlowRunTable>(columnFamily, converter);
+    column = new ColumnHelper<FlowRunTable>(columnFamily, converter, true);
     this.columnFamily = columnFamily;
     this.columnPrefix = columnPrefix;
     if (columnPrefix == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61136d03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
index cda4510..0edd6a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
@@ -362,10 +362,10 @@ class ApplicationEntityReader extends GenericEntityReader {
 
   private void setMetricsTimeRange(Query query) {
     // Set time range for metric values.
-    HBaseTimelineStorageUtils.
-        setMetricsTimeRange(query, ApplicationColumnFamily.METRICS.getBytes(),
-            getDataToRetrieve().getMetricsTimeBegin(),
-            getDataToRetrieve().getMetricsTimeEnd());
+    HBaseTimelineStorageUtils.setMetricsTimeRange(
+        query, ApplicationColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61136d03/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
index 6b740e2..d7aca74 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -471,10 +471,10 @@ class GenericEntityReader extends TimelineEntityReader {
 
   private void setMetricsTimeRange(Query query) {
     // Set time range for metric values.
-    HBaseTimelineStorageUtils.
-        setMetricsTimeRange(query, EntityColumnFamily.METRICS.getBytes(),
-            getDataToRetrieve().getMetricsTimeBegin(),
-            getDataToRetrieve().getMetricsTimeEnd());
+    HBaseTimelineStorageUtils.setMetricsTimeRange(
+        query, EntityColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
   }
 
   @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: YARN-6256. Add FROM_ID info key for timeline entities in reader response (Rohith Sharma K S via Varun Saxena)

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
index 1a518d0..4a9e53e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -74,14 +74,14 @@ final class TimelineReaderWebServicesUtils {
   static TimelineEntityFilters createTimelineEntityFilters(String limit,
       String createdTimeStart, String createdTimeEnd, String relatesTo,
       String isRelatedTo, String infofilters, String conffilters,
-      String metricfilters, String eventfilters, String fromidprefix,
+      String metricfilters, String eventfilters,
       String fromid) throws TimelineParseException {
     return new TimelineEntityFilters(parseLongStr(limit),
         parseLongStr(createdTimeStart), parseLongStr(createdTimeEnd),
         parseRelationFilters(relatesTo), parseRelationFilters(isRelatedTo),
         parseKVFilters(infofilters, false), parseKVFilters(conffilters, true),
         parseMetricFilters(metricfilters), parseEventFilters(eventfilters),
-        parseLongStr(fromidprefix), parseStr(fromid));
+        parseStr(fromid));
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HADOOP-13345

Posted by st...@apache.org.
Merge branch 'trunk' into HADOOP-13345


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11e5f54f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11e5f54f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11e5f54f

Branch: refs/heads/HADOOP-13345
Commit: 11e5f54fee519980dcd82211125b15cb9bfeb0e4
Parents: 845cd36 f9e0cc8
Author: Steve Loughran <st...@apache.org>
Authored: Thu Aug 31 16:56:02 2017 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Aug 31 16:56:02 2017 +0100

----------------------------------------------------------------------
 BUILDING.txt                                    |    4 +-
 dev-support/docker/Dockerfile                   |   17 +-
 .../authentication/client/AuthenticatedURL.java |  184 +-
 .../client/KerberosAuthenticator.java           |   30 +-
 .../client/PseudoAuthenticator.java             |    5 +-
 .../hadoop-common/HadoopCommon.cmake            |   26 +-
 .../hadoop-common/src/CMakeLists.txt            |    2 +-
 .../src/main/conf/hadoop-policy.xml             |   11 +
 .../org/apache/hadoop/conf/Configuration.java   |   71 +-
 .../crypto/key/KeyProviderCryptoExtension.java  |  147 +-
 .../crypto/key/kms/KMSClientProvider.java       |  158 +-
 .../hadoop/crypto/key/kms/KMSRESTConstants.java |    1 +
 .../key/kms/LoadBalancingKMSClientProvider.java |   20 +
 .../hadoop/fs/FSDataOutputStreamBuilder.java    |   74 +-
 .../org/apache/hadoop/fs/FsServerDefaults.java  |    9 +
 .../org/apache/hadoop/fs/LocatedFileStatus.java |    6 +-
 .../hadoop/fs/permission/FsPermission.java      |    2 +-
 .../apache/hadoop/fs/sftp/SFTPFileSystem.java   |    6 +
 .../org/apache/hadoop/fs/shell/AclCommands.java |    6 +-
 .../hadoop/fs/shell/CommandWithDestination.java |    4 +-
 .../java/org/apache/hadoop/fs/shell/Ls.java     |    4 +-
 .../org/apache/hadoop/http/HttpServer2.java     |    9 +-
 .../hadoop/io/erasurecode/CodecRegistry.java    |    2 +-
 .../io/erasurecode/ErasureCodeConstants.java    |    8 +
 .../apache/hadoop/ipc/ProtobufRpcEngine.java    |   14 +-
 .../java/org/apache/hadoop/util/KMSUtil.java    |  134 ++
 .../hadoop/util/curator/ZKCuratorManager.java   |  142 +-
 .../bzip2/org_apache_hadoop_io_compress_bzip2.h |    2 +
 .../src/main/resources/core-default.xml         |   10 +-
 .../hadoop-common/src/site/markdown/Metrics.md  |   24 +-
 .../src/site/markdown/filesystem/filesystem.md  |   33 +-
 .../filesystem/fsdataoutputstreambuilder.md     |  182 ++
 .../src/site/markdown/filesystem/index.md       |    1 +
 .../conf/TestCommonConfigurationFields.java     |    1 +
 .../apache/hadoop/conf/TestConfiguration.java   |  390 ++--
 .../hadoop/conf/TestConfigurationSubclass.java  |    8 +-
 .../apache/hadoop/conf/TestDeprecatedKeys.java  |    8 +-
 .../apache/hadoop/conf/TestGetInstances.java    |    8 +-
 .../key/TestKeyProviderCryptoExtension.java     |  113 +-
 .../org/apache/hadoop/fs/TestAvroFSInput.java   |    6 +-
 .../test/java/org/apache/hadoop/fs/TestDU.java  |   19 +-
 .../java/org/apache/hadoop/fs/TestFilterFs.java |    8 +-
 .../hadoop/fs/TestGetFileBlockLocations.java    |   25 +-
 .../org/apache/hadoop/fs/TestGlobExpander.java  |    7 +-
 .../apache/hadoop/fs/TestLocatedFileStatus.java |   52 +
 .../java/org/apache/hadoop/fs/TestTrash.java    |   24 +-
 .../apache/hadoop/fs/TestTruncatedInputBug.java |    6 +-
 .../hadoop/fs/permission/TestFsPermission.java  |   14 +-
 .../org/apache/hadoop/http/TestHttpServer.java  |   13 +
 .../hadoop/http/TestHttpServerWithSpengo.java   |  242 ++-
 .../apache/hadoop/ipc/TestFairCallQueue.java    |   33 +-
 .../org/apache/hadoop/log/TestLog4Json.java     |    6 +-
 .../hadoop/net/TestScriptBasedMapping.java      |    9 +-
 .../TestScriptBasedMappingWithDependency.java   |    7 +-
 .../security/TestAuthenticationFilter.java      |    6 +-
 .../TestAuthenticationWithProxyUserFilter.java  |    7 +-
 .../security/TestWhitelistBasedResolver.java    |    9 +-
 .../apache/hadoop/security/token/TestToken.java |   12 +-
 .../hadoop/util/TestAsyncDiskService.java       |    6 +-
 .../apache/hadoop/util/TestCacheableIPList.java |   13 +-
 .../apache/hadoop/util/TestFileBasedIPList.java |   12 +-
 .../org/apache/hadoop/util/TestFindClass.java   |    2 +-
 .../apache/hadoop/util/TestGenericsUtil.java    |   12 +-
 .../org/apache/hadoop/util/TestIndexedSort.java |    7 +-
 .../hadoop/util/TestNativeLibraryChecker.java   |    8 +-
 .../util/curator/TestZKCuratorManager.java      |   41 +-
 ...rKeyGeneratorKeyProviderCryptoExtension.java |    6 +
 .../hadoop/crypto/key/kms/server/KMS.java       |  113 +-
 .../crypto/key/kms/server/KMSJSONReader.java    |    8 +-
 .../key/kms/server/KMSServerJSONUtils.java      |   34 +-
 .../hadoop/crypto/key/kms/server/KMSWebApp.java |   18 +
 .../kms/server/KeyAuthorizationKeyProvider.java |   19 +
 .../hadoop-kms/src/site/markdown/index.md.vm    |   60 +-
 .../hadoop/crypto/key/kms/server/TestKMS.java   |   46 +-
 .../crypto/key/kms/server/TestKMSAudit.java     |    7 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   25 +-
 .../hadoop/hdfs/DFSClientFaultInjector.java     |    2 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  153 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java     |   58 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java    |    3 +-
 .../hadoop/hdfs/DistributedFileSystem.java      |   88 +-
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |   46 +-
 .../hdfs/client/HdfsClientConfigKeys.java       |    2 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java    |   35 +-
 .../hdfs/protocol/ErasureCodingPolicy.java      |   55 +-
 .../hdfs/protocol/ErasureCodingPolicyState.java |   73 +
 .../hadoop/hdfs/protocol/HdfsConstants.java     |    7 +
 .../hdfs/protocol/ReencryptionStatus.java       |  216 ++
 .../protocol/ReencryptionStatusIterator.java    |   58 +
 .../protocol/SystemErasureCodingPolicies.java   |   18 +-
 .../hdfs/protocol/ZoneReencryptionStatus.java   |  257 +++
 .../ClientNamenodeProtocolTranslatorPB.java     |   44 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  154 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |    4 +
 .../src/main/proto/encryption.proto             |   41 +
 .../src/main/proto/hdfs.proto                   |   24 +
 .../fs/http/client/BaseTestHttpFSWith.java      |    1 +
 .../src/CMakeLists.txt                          |    2 +-
 .../src/main/native/fuse-dfs/CMakeLists.txt     |    2 +
 .../main/native/libhdfs-tests/CMakeLists.txt    |    2 +
 .../src/main/native/libhdfs/CMakeLists.txt      |    3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   17 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |   39 +-
 .../hdfs/qjournal/server/JournalNodeSyncer.java |   23 +-
 .../blockmanagement/DatanodeDescriptor.java     |    6 +-
 .../server/blockmanagement/DatanodeManager.java |   45 +-
 .../hdfs/server/datanode/BlockReceiver.java     |    3 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |    7 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |    5 +-
 .../namenode/CheckpointFaultInjector.java       |   18 +-
 .../namenode/EncryptionFaultInjector.java       |    9 +
 .../server/namenode/EncryptionZoneManager.java  |  351 +++-
 .../namenode/ErasureCodingPolicyManager.java    |  202 +-
 .../hdfs/server/namenode/FSDirAttrOp.java       |    4 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |  238 ++-
 .../server/namenode/FSDirErasureCodingOp.java   |   44 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |    6 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java      |    7 +
 .../hdfs/server/namenode/FSDirectory.java       |   11 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |    4 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   95 +-
 .../hadoop/hdfs/server/namenode/INode.java      |   18 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   31 +-
 .../server/namenode/ReencryptionHandler.java    |  940 +++++++++
 .../server/namenode/ReencryptionUpdater.java    |  523 +++++
 .../server/namenode/ha/StandbyCheckpointer.java |   16 +-
 .../namenode/snapshot/SnapshotManager.java      |   28 +
 .../apache/hadoop/hdfs/tools/CryptoAdmin.java   |  134 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |   36 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |   40 +-
 .../src/main/resources/hdfs-default.xml         |   98 +-
 .../src/main/webapps/hdfs/explorer.js           |    6 +
 .../src/site/markdown/HDFSErasureCoding.md      |   24 +-
 .../src/site/markdown/TransparentEncryption.md  |   45 +-
 .../apache/hadoop/cli/TestErasureCodingCLI.java |    2 +-
 .../TestClientProtocolForPipelineRecovery.java  |   47 +
 .../apache/hadoop/hdfs/TestDFSInputStream.java  |   41 +
 .../org/apache/hadoop/hdfs/TestDFSShell.java    |   24 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |    2 -
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   47 +
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  114 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  |   26 +-
 .../apache/hadoop/hdfs/TestRollingUpgrade.java  |   48 +
 .../org/apache/hadoop/hdfs/TestSafeMode.java    |   25 +
 .../qjournal/server/TestJournalNodeSync.java    |  176 +-
 .../blockmanagement/TestBlockManager.java       |   89 +-
 .../blockmanagement/TestDatanodeManager.java    |   96 +-
 .../server/datanode/TestDirectoryScanner.java   |   60 +-
 .../hdfs/server/namenode/FSAclBaseTest.java     |    6 +
 .../namenode/TestEncryptionZoneManager.java     |   26 +
 .../hdfs/server/namenode/TestFSDirAttrOp.java   |    7 +
 .../hdfs/server/namenode/TestFSImage.java       |   87 +
 .../hdfs/server/namenode/TestReencryption.java  | 1847 ++++++++++++++++++
 .../namenode/TestReencryptionHandler.java       |  197 ++
 .../server/namenode/TestStripedINodeFile.java   |   27 +-
 .../snapshot/TestOpenFilesWithSnapshot.java     |  221 +++
 .../snapshot/TestRandomOpsWithSnapshots.java    |  691 +++++++
 .../snapshot/TestSnapshotDiffReport.java        |  163 +-
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |  182 +-
 .../src/test/resources/testCryptoConf.xml       |  112 ++
 .../test/resources/testErasureCodingConf.xml    |  179 +-
 .../src/test/resources/test_ec_policies.xml     |    2 +-
 .../jobhistory/JobHistoryEventHandler.java      |  105 +-
 .../hadoop/mapreduce/jobhistory/JobSummary.java |   49 +-
 .../apache/hadoop/mapreduce/v2/app/job/Job.java |    4 +
 .../mapreduce/v2/app/job/impl/JobImpl.java      |   43 +-
 .../v2/app/job/impl/TaskAttemptImpl.java        |   58 +-
 .../mapreduce/v2/app/job/impl/TaskImpl.java     |   19 +-
 .../v2/app/rm/RMContainerAllocator.java         |   14 +-
 .../hadoop/mapreduce/jobhistory/TestEvents.java |    4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  |   49 +-
 .../mapreduce/jobhistory/TestJobSummary.java    |    6 +-
 .../hadoop/mapreduce/v2/app/MockJobs.java       |   19 +
 .../mapreduce/v2/app/TestRuntimeEstimators.java |   20 +
 .../v2/app/rm/TestRMContainerAllocator.java     |  136 ++
 .../hadoop-mapreduce-client-common/pom.xml      |    4 -
 .../mapred/TestMRWithDistributedCache.java      |   40 +-
 .../src/main/avro/Events.avpr                   |   10 +-
 .../ClientDistributedCacheManager.java          |    6 +-
 .../mapreduce/jobhistory/HistoryViewer.java     |   18 +-
 .../HumanReadableHistoryViewerPrinter.java      |    4 +-
 .../jobhistory/JSONHistoryViewerPrinter.java    |    4 +-
 .../mapreduce/jobhistory/JobFinishedEvent.java  |   55 +-
 .../mapreduce/jobhistory/JobHistoryParser.java  |   34 +-
 .../JobUnsuccessfulCompletionEvent.java         |   78 +-
 .../jobhistory/MapAttemptFinishedEvent.java     |   87 +-
 .../jobhistory/ReduceAttemptFinishedEvent.java  |   83 +-
 .../jobhistory/TaskAttemptFinishedEvent.java    |   47 +-
 .../TaskAttemptUnsuccessfulCompletionEvent.java |   76 +-
 .../mapreduce/jobhistory/TaskFailedEvent.java   |   51 +-
 .../mapreduce/jobhistory/TaskFinishedEvent.java |   42 +-
 .../hadoop/mapred/TestFileOutputCommitter.java  |   23 +-
 .../apache/hadoop/mapred/TestIndexCache.java    |   15 +-
 .../hadoop/mapred/TestJobEndNotifier.java       |   12 +-
 .../hadoop/mapred/TestMapFileOutputFormat.java  |    6 +-
 .../mapreduce/TestJobMonitorAndPrint.java       |    8 +-
 .../jobhistory/TestHistoryViewerPrinter.java    |    4 +-
 .../lib/output/TestFileOutputCommitter.java     |   39 +-
 .../lib/output/TestFileOutputFormat.java        |    7 +-
 .../lib/output/TestMapFileOutputFormat.java     |    2 +
 .../hadoop/mapreduce/v2/hs/CompletedJob.java    |   55 +-
 .../hadoop/mapreduce/v2/hs/PartialJob.java      |   19 +
 .../hadoop/mapreduce/v2/hs/UnparsedJob.java     |   20 +
 .../mapreduce/v2/hs/TestJobHistoryParsing.java  |  122 +-
 .../v2/hs/webapp/TestHsWebServicesAcls.java     |   20 +
 .../java/org/apache/hadoop/fs/TestDFSIO.java    |   11 +-
 .../mapred/TestMRTimelineEventHandling.java     |   30 +-
 .../mapred/jobcontrol/TestJobControl.java       |    6 +-
 .../mapreduce/JobHistoryFileReplayMapperV1.java |    3 +-
 .../hadoop/mapreduce/SimpleEntityWriterV1.java  |    3 +-
 .../hadoop/mapreduce/TestMapCollection.java     |    6 -
 .../lib/input/TestDelegatingInputFormat.java    |    7 +-
 .../lib/jobcontrol/TestMapReduceJobControl.java |    2 +-
 .../output/TestMRCJCFileOutputCommitter.java    |   17 +-
 .../src/CMakeLists.txt                          |    2 +-
 .../mapred/nativetask/TestTaskContext.java      |   23 +-
 .../nativetask/buffer/TestInputBuffer.java      |    7 +-
 .../nativetask/buffer/TestOutputBuffer.java     |    7 +-
 .../nativetask/serde/TestKVSerializer.java      |   13 +-
 .../nativetask/utils/TestReadWriteBuffer.java   |    7 +-
 .../nativetask/utils/TestSizedWritable.java     |    5 +-
 .../examples/TestBaileyBorweinPlouffe.java      |   14 +-
 .../hadoop/examples/pi/math/TestLongLong.java   |   33 +-
 .../hadoop/examples/pi/math/TestModular.java    |  105 +-
 .../hadoop/examples/pi/math/TestSummation.java  |   30 +-
 .../maven/plugin/cmakebuilder/CompileMojo.java  |    2 -
 hadoop-project/pom.xml                          |    2 +-
 hadoop-project/src/site/markdown/index.md.vm    |    8 +-
 .../fs/aliyun/oss/AliyunOSSFileSystem.java      |   27 +
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java |    4 +
 .../apache/hadoop/fs/adl/TestGetFileStatus.java |    1 +
 .../fs/azure/AzureNativeFileSystemStore.java    |   57 +-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |   47 +-
 .../fs/azure/SecureStorageInterfaceImpl.java    |   29 +-
 .../hadoop-azure/src/site/markdown/index.md     |    9 +
 .../TestFileSystemOperationsWithThreads.java    |   61 +-
 ...stNativeAzureFSAuthWithBlobSpecificKeys.java |   44 +
 ...estNativeAzureFileSystemConcurrencyLive.java |  165 +-
 .../hadoop/contrib/utils/join/TestDataJoin.java |   41 +-
 .../hadoop/tools/CopyListingFileStatus.java     |    4 +-
 .../apache/hadoop/tools/util/DistCpUtils.java   |    4 +-
 .../hadoop/tools/util/TestDistCpUtils.java      |   26 +-
 .../org/apache/hadoop/tools/TestDistCh.java     |   23 +-
 hadoop-tools/hadoop-pipes/src/CMakeLists.txt    |    2 +-
 .../rumen/Job20LineHistoryEventEmitter.java     |    6 +-
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |    4 +
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |    2 +-
 .../yarn/sls/scheduler/RMNodeWrapper.java       |    2 +-
 .../hadoop-sls/src/test/resources/yarn-site.xml |   10 -
 .../typedbytes/TestTypedBytesWritable.java      |    7 +-
 .../api/protocolrecords/AllocateResponse.java   |   32 +-
 .../hadoop/yarn/api/records/CollectorInfo.java  |   59 +
 .../timelineservice/ApplicationEntity.java      |   28 +
 .../records/timelineservice/TimelineEntity.java |   52 +-
 .../hadoop/yarn/conf/YarnConfiguration.java     |  108 +-
 .../hadoop/yarn/util/TimelineServiceHelper.java |    8 +
 .../src/main/proto/yarn_protos.proto            |    5 +
 .../src/main/proto/yarn_service_protos.proto    |    2 +-
 .../timelineservice/TestApplicationEntity.java  |   71 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   10 +
 .../distributedshell/ApplicationMaster.java     |   42 +-
 .../distributedshell/TestDistributedShell.java  |  173 +-
 .../hadoop/yarn/client/api/AMRMClient.java      |    5 +-
 .../api/async/impl/AMRMClientAsyncImpl.java     |   19 +-
 .../hadoop/yarn/client/cli/RMAdminCLI.java      |    3 +-
 .../ApplicationMasterServiceProtoTestBase.java  |   72 +
 .../hadoop/yarn/client/ProtocolHATestBase.java  |   20 +-
 ...ationMasterServiceProtocolForTimelineV2.java |   71 +
 ...estApplicationMasterServiceProtocolOnHA.java |   46 +-
 .../api/async/impl/TestAMRMClientAsync.java     |    2 +-
 .../client/api/impl/BaseAMRMProxyE2ETest.java   |    2 +-
 .../impl/TestAMRMClientContainerRequest.java    |    6 +-
 .../yarn/client/api/impl/TestAMRMProxy.java     |   10 +-
 .../hadoop/yarn/client/cli/TestLogsCLI.java     |   65 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |   14 +-
 .../impl/pb/AllocateResponsePBImpl.java         |   37 +-
 .../records/impl/pb/CollectorInfoPBImpl.java    |  152 ++
 .../yarn/client/api/TimelineV2Client.java       |   10 +-
 .../client/api/impl/TimelineV2ClientImpl.java   |  117 +-
 .../logaggregation/AggregatedLogFormat.java     |   22 +-
 .../logaggregation/LogAggregationUtils.java     |   41 +
 .../yarn/logaggregation/LogToolUtils.java       |   52 +
 .../LogAggregationFileController.java           |  404 ++++
 .../LogAggregationFileControllerContext.java    |  130 ++
 .../LogAggregationFileControllerFactory.java    |  195 ++
 .../LogAggregationTFileController.java          |  127 ++
 .../filecontroller/package-info.java            |   21 +
 .../hadoop/yarn/util/BoundedAppender.java       |  142 ++
 .../src/main/resources/yarn-default.xml         |   71 +
 .../hadoop/yarn/api/TestPBImplRecords.java      |    2 +
 .../api/impl/TestTimelineClientV2Impl.java      |   56 +-
 .../logaggregation/TestAggregatedLogsBlock.java |   28 +-
 .../logaggregation/TestContainerLogsUtils.java  |   29 +-
 ...TestLogAggregationFileControllerFactory.java |  171 ++
 .../hadoop/yarn/util/TestBoundedAppender.java   |  115 ++
 .../ApplicationHistoryServer.java               |   79 +-
 .../security/TimelineAuthenticationFilter.java  |   49 -
 ...TimelineAuthenticationFilterInitializer.java |  129 --
 ...lineDelegationTokenSecretManagerService.java |  240 ---
 ...neV1DelegationTokenSecretManagerService.java |  225 +++
 .../TestTimelineAuthenticationFilter.java       |  323 ---
 .../TestTimelineAuthenticationFilterForV1.java  |  332 ++++
 ...TimelineAuthenticationFilterInitializer.java |   76 -
 .../hadoop-yarn-server-common/pom.xml           |    5 +
 .../protocolrecords/NodeHeartbeatRequest.java   |   13 +-
 .../protocolrecords/NodeHeartbeatResponse.java  |   14 +-
 .../ReportNewCollectorInfoRequest.java          |   13 +-
 .../impl/pb/NodeHeartbeatRequestPBImpl.java     |   74 +-
 .../impl/pb/NodeHeartbeatResponsePBImpl.java    |  103 +-
 .../pb/ReportNewCollectorInfoRequestPBImpl.java |   36 +-
 .../server/api/records/AppCollectorData.java    |  125 ++
 .../server/api/records/AppCollectorsMap.java    |   46 -
 .../records/impl/pb/AppCollectorDataPBImpl.java |  227 +++
 .../records/impl/pb/AppCollectorsMapPBImpl.java |  152 --
 .../api/records/impl/pb/package-info.java       |   19 +
 .../store/impl/SQLFederationStateStore.java     |   79 +
 .../impl/ZookeeperFederationStateStore.java     |  634 ++++++
 .../FederationStateStoreClientMetrics.java      |  184 ++
 .../federation/store/metrics/package-info.java  |   17 +
 .../security/TimelineAuthenticationFilter.java  |   55 +
 ...TimelineAuthenticationFilterInitializer.java |  137 ++
 ...elineDelgationTokenSecretManagerService.java |   83 +
 .../server/timeline/security/package-info.java  |   26 +
 .../server/uam/UnmanagedApplicationManager.java |    2 +-
 .../util/timeline/TimelineServerUtils.java      |   92 +
 .../yarn/server/util/timeline/package-info.java |   25 +
 .../yarn_server_common_service_protos.proto     |   19 +-
 .../java/org/apache/hadoop/yarn/TestRPC.java    |   36 +-
 .../hadoop/yarn/TestYarnServerApiClasses.java   |   54 +-
 .../impl/TestZookeeperFederationStateStore.java |   89 +
 .../TestFederationStateStoreClientMetrics.java  |  146 ++
 .../TestFederationStateStoreFacadeRetry.java    |   20 +-
 ...TimelineAuthenticationFilterInitializer.java |   78 +
 .../src/CMakeLists.txt                          |   34 +-
 .../CMgrDecreaseContainersResourceEvent.java    |   37 -
 .../nodemanager/CMgrUpdateContainersEvent.java  |   48 +
 .../server/nodemanager/ContainerExecutor.java   |   10 +-
 .../nodemanager/ContainerManagerEventType.java  |    2 +-
 .../hadoop/yarn/server/nodemanager/Context.java |   14 +-
 .../nodemanager/DefaultContainerExecutor.java   |   12 +-
 .../server/nodemanager/DeletionService.java     |    7 +-
 .../server/nodemanager/DirectoryCollection.java |   68 +-
 .../nodemanager/LinuxContainerExecutor.java     |    8 +-
 .../nodemanager/LocalDirsHandlerService.java    |   66 +-
 .../yarn/server/nodemanager/NMAuditLogger.java  |    7 +-
 .../yarn/server/nodemanager/NodeManager.java    |   77 +-
 .../nodemanager/NodeResourceMonitorImpl.java    |    8 +-
 .../nodemanager/NodeStatusUpdaterImpl.java      |   74 +-
 .../amrmproxy/AMRMProxyTokenSecretManager.java  |    8 +-
 .../nodemanager/api/impl/pb/NMProtoUtils.java   |    7 +-
 .../collectormanager/NMCollectorService.java    |   50 +-
 .../containermanager/AuxServices.java           |   14 +-
 .../containermanager/ContainerManagerImpl.java  |   68 +-
 .../ApplicationContainerFinishedEvent.java      |    9 +-
 .../application/ApplicationImpl.java            |   40 +-
 .../containermanager/container/Container.java   |    2 +
 .../container/ContainerImpl.java                |   31 +-
 .../deletion/task/DeletionTask.java             |    7 +-
 .../launcher/ContainerLaunch.java               |   50 +-
 .../launcher/ContainerRelaunch.java             |    7 +-
 .../launcher/ContainersLauncher.java            |    7 +-
 .../launcher/RecoveredContainerLaunch.java      |    8 +-
 .../privileged/PrivilegedOperationExecutor.java |    7 +-
 .../CGroupsBlkioResourceHandlerImpl.java        |    8 +-
 .../CGroupsCpuResourceHandlerImpl.java          |    7 +-
 .../linux/resources/CGroupsHandlerImpl.java     |    9 +-
 .../CGroupsMemoryResourceHandlerImpl.java       |    8 +-
 .../linux/resources/ResourceHandlerModule.java  |   13 +-
 .../TrafficControlBandwidthHandlerImpl.java     |   10 +-
 .../linux/resources/TrafficController.java      |    7 +-
 .../runtime/DefaultLinuxContainerRuntime.java   |    8 +-
 .../DelegatingLinuxContainerRuntime.java        |    8 +-
 .../runtime/DockerLinuxContainerRuntime.java    |    8 +-
 .../JavaSandboxLinuxContainerRuntime.java       |   13 +-
 .../linux/runtime/docker/DockerClient.java      |    7 +-
 .../runtime/docker/DockerCommandExecutor.java   |    7 +-
 .../localizer/ContainerLocalizer.java           |    7 +-
 .../localizer/LocalResourcesTrackerImpl.java    |    7 +-
 .../localizer/LocalizedResource.java            |    7 +-
 .../localizer/ResourceLocalizationService.java  |    9 +-
 .../containermanager/localizer/ResourceSet.java |    7 +-
 .../security/LocalizerSecurityInfo.java         |    7 +-
 .../security/LocalizerTokenSelector.java        |    8 +-
 .../sharedcache/SharedCacheUploadService.java   |    8 +-
 .../sharedcache/SharedCacheUploader.java        |    7 +-
 .../logaggregation/AppLogAggregatorImpl.java    |  240 +--
 .../logaggregation/LogAggregationService.java   |  230 +--
 .../SampleContainerLogAggregationPolicy.java    |    8 +-
 .../loghandler/NonAggregatingLogHandler.java    |    8 +-
 .../event/LogHandlerContainerFinishedEvent.java |    9 +-
 .../monitor/ContainersMonitorImpl.java          |    8 +-
 .../scheduler/ContainerScheduler.java           |    1 +
 .../ConfigurationNodeLabelsProvider.java        |    8 +-
 .../ScriptBasedNodeLabelsProvider.java          |    7 +-
 .../recovery/NMLeveldbStateStoreService.java    |   36 +-
 .../recovery/NMNullStateStoreService.java       |    2 +-
 .../recovery/NMStateStoreService.java           |   13 +-
 .../security/NMContainerTokenSecretManager.java |    8 +-
 .../security/NMTokenSecretManagerInNM.java      |    8 +-
 .../security/authorize/NMPolicyProvider.java    |   22 +-
 .../timelineservice/NMTimelinePublisher.java    |   59 +-
 .../util/CgroupsLCEResourcesHandler.java        |   10 +-
 .../util/DefaultLCEResourcesHandler.java        |    8 +-
 .../util/NodeManagerHardwareUtils.java          |    8 +-
 .../nodemanager/util/ProcessIdFileReader.java   |    7 +-
 .../nodemanager/webapp/NMWebServices.java       |   17 +-
 .../server/nodemanager/webapp/NavBlock.java     |    8 +-
 .../server/nodemanager/webapp/WebServer.java    |    7 +-
 .../impl/container-executor.c                   |   48 +-
 .../impl/container-executor.h                   |    2 +
 .../main/native/container-executor/impl/main.c  |   13 +-
 .../impl/modules/cgroups/cgroups-operations.c   |  161 ++
 .../impl/modules/cgroups/cgroups-operations.h   |   55 +
 .../impl/modules/common/constants.h             |   29 +
 .../impl/modules/common/module-configs.c        |   41 +
 .../impl/modules/common/module-configs.h        |   33 +
 .../impl/modules/gpu/gpu-module.c               |  229 +++
 .../impl/modules/gpu/gpu-module.h               |   45 +
 .../main/native/container-executor/impl/util.h  |    7 +
 .../container-executor/impl/utils/path-utils.c  |   52 +
 .../container-executor/impl/utils/path-utils.h  |   35 +
 .../impl/utils/string-utils.c                   |  109 +-
 .../impl/utils/string-utils.h                   |    7 +-
 .../test/modules/cgroups/test-cgroups-module.cc |  121 ++
 .../test/modules/gpu/test-gpu-module.cc         |  203 ++
 .../test/test-container-executor-common.h       |   36 +
 .../test/test-container-executor.c              |   23 +-
 .../native/container-executor/test/test_main.cc |   11 +-
 .../test/utils/test-path-utils.cc               |   67 +
 .../test/utils/test-string-utils.cc             |   93 +
 .../nodemanager/DummyContainerManager.java      |    8 +-
 .../nodemanager/MockNodeStatusUpdater.java      |    7 +-
 .../TestContainerManagerWithLCE.java            |    8 +-
 .../nodemanager/TestDirectoryCollection.java    |   23 +
 .../nodemanager/TestLinuxContainerExecutor.java |    8 +-
 .../TestLinuxContainerExecutorWithMocks.java    |    8 +-
 .../nodemanager/TestNodeHealthService.java      |    8 +-
 .../nodemanager/TestNodeManagerReboot.java      |    7 +-
 .../nodemanager/TestNodeManagerResync.java      |    8 +-
 .../nodemanager/TestNodeStatusUpdater.java      |    7 +-
 .../amrmproxy/BaseAMRMProxyTest.java            |   17 +-
 .../amrmproxy/TestAMRMProxyService.java         |    8 +-
 .../BaseContainerManagerTest.java               |    8 +-
 .../containermanager/TestAuxServices.java       |    7 +-
 .../containermanager/TestContainerManager.java  |    4 +-
 .../application/TestApplication.java            |    2 +-
 .../launcher/TestContainerLaunch.java           |  105 +
 .../TestPrivilegedOperationExecutor.java        |    8 +-
 .../linux/resources/TestCGroupsHandlerImpl.java |    8 +-
 .../resources/TestResourceHandlerModule.java    |    8 +-
 .../TestTrafficControlBandwidthHandlerImpl.java |    8 +-
 .../linux/resources/TestTrafficController.java  |    7 +-
 .../runtime/TestDockerContainerRuntime.java     |    8 +-
 .../localizer/TestContainerLocalizer.java       |   11 +-
 .../TestAppLogAggregatorImpl.java               |   25 +-
 .../TestLogAggregationService.java              |  244 ++-
 .../TestNonAggregatingLogHandler.java           |   10 +-
 .../monitor/TestContainersMonitor.java          |    4 +-
 .../TestContainerSchedulerQueuing.java          |    4 +-
 .../recovery/NMMemoryStateStoreService.java     |    4 +-
 .../TestNMLeveldbStateStoreService.java         |    6 +-
 .../nodemanager/webapp/MockContainer.java       |    4 +
 .../nodemanager/webapp/TestNMWebServer.java     |    4 +-
 .../hadoop-yarn-server-resourcemanager/pom.xml  |    3 +
 .../ApplicationMasterService.java               |   38 +-
 .../server/resourcemanager/ClientRMService.java |   30 +-
 .../resourcemanager/DefaultAMSProcessor.java    |    8 +-
 ...pportunisticContainerAllocatorAMService.java |    2 +
 .../resourcemanager/ResourceTrackerService.java |   90 +-
 .../metrics/TimelineServiceV2Publisher.java     |   12 +-
 .../monitor/SchedulingMonitor.java              |    8 +-
 .../FifoIntraQueuePreemptionPlugin.java         |   26 +-
 .../recovery/ZKRMStateStore.java                |  158 +-
 .../server/resourcemanager/rmapp/RMApp.java     |   35 +-
 .../resourcemanager/rmapp/RMAppEventType.java   |    3 -
 .../server/resourcemanager/rmapp/RMAppImpl.java |   31 +-
 .../rmapp/attempt/RMAppAttemptImpl.java         |  139 +-
 .../rmapp/attempt/RMAppAttemptMetrics.java      |   19 +-
 .../rmcontainer/RMContainerImpl.java            |    8 +-
 .../server/resourcemanager/rmnode/RMNode.java   |    6 +-
 .../rmnode/RMNodeDecreaseContainerEvent.java    |   39 -
 .../resourcemanager/rmnode/RMNodeEventType.java |    2 +-
 .../resourcemanager/rmnode/RMNodeImpl.java      |   29 +-
 .../rmnode/RMNodeUpdateContainerEvent.java      |   44 +
 .../scheduler/AbstractYarnScheduler.java        |   23 +-
 .../scheduler/SchedulerApplicationAttempt.java  |   51 +-
 .../scheduler/capacity/CapacityScheduler.java   |   12 +
 .../distributed/NodeQueueLoadMonitor.java       |    4 +
 .../scheduler/event/ReleaseContainerEvent.java  |   46 +
 .../scheduler/event/SchedulerEventType.java     |    3 +
 .../scheduler/fair/FSAppAttempt.java            |   22 +-
 .../scheduler/fair/FairScheduler.java           |   13 +
 .../scheduler/fifo/FifoScheduler.java           |   15 +-
 .../webapp/CapacitySchedulerPage.java           |    7 +-
 .../resourcemanager/webapp/RMWebServices.java   |    2 +-
 .../resourcemanager/webapp/dao/AppInfo.java     |  184 +-
 .../resourcemanager/webapp/dao/AppsInfo.java    |    4 +
 .../webapp/dao/FairSchedulerQueueInfo.java      |   18 -
 .../yarn/server/resourcemanager/MockAM.java     |   31 +-
 .../yarn/server/resourcemanager/MockNM.java     |   16 +
 .../yarn/server/resourcemanager/MockNodes.java  |    4 +-
 .../yarn/server/resourcemanager/MockRM.java     |   11 +
 .../TestApplicationMasterService.java           |   37 +-
 .../resourcemanager/TestClientRMService.java    |   14 +-
 ...pportunisticContainerAllocatorAMService.java |  200 +-
 .../TestRMHATimelineCollectors.java             |  126 ++
 .../server/resourcemanager/TestRMRestart.java   |  131 ++
 .../TestResourceTrackerService.java             |   32 +-
 .../TestTokenClientRMService.java               |    3 +
 .../applicationsmanager/MockAsm.java            |   17 +-
 .../TestSystemMetricsPublisherForV2.java        |   13 +-
 .../server/resourcemanager/rmapp/MockRMApp.java |   18 +-
 .../rmapp/attempt/TestBoundedAppender.java      |  116 --
 .../TestRMAppAttemptImplDiagnostics.java        |    3 +-
 .../attempt/TestRMAppAttemptTransitions.java    |   32 +-
 .../capacity/TestContainerResizing.java         |   38 +-
 .../capacity/TestIncreaseAllocationExpirer.java |   31 +-
 .../security/TestDelegationTokenRenewer.java    |   18 +
 .../security/TestRMDelegationTokens.java        |    4 +
 .../webapp/TestRMWebServices.java               |    5 +-
 .../yarn/server/router/RouterMetrics.java       |  236 +++
 .../clientrm/FederationClientInterceptor.java   |   37 +-
 .../webapp/FederationInterceptorREST.java       |  234 ++-
 .../router/webapp/RouterWebServiceUtil.java     |  109 +-
 .../yarn/server/router/TestRouterMetrics.java   |  298 +++
 .../MockDefaultRequestInterceptorREST.java      |   49 +-
 .../webapp/TestFederationInterceptorREST.java   |   29 +-
 .../TestFederationInterceptorRESTRetry.java     |   45 +
 .../router/webapp/TestRouterWebServiceUtil.java |  311 +++
 .../hadoop-yarn-server-tests/pom.xml            |   11 +
 .../hadoop/yarn/server/TestRMNMSecretKeys.java  |   34 +-
 .../TestTimelineServiceClientIntegration.java   |   15 +-
 .../security/TestTimelineAuthFilterForV2.java   |  478 +++++
 .../src/test/resources/krb5.conf                |   28 -
 .../AbstractTimelineReaderHBaseTestBase.java    |  177 ++
 ...stTimelineReaderWebServicesHBaseStorage.java | 1007 +++++++---
 .../storage/DataGeneratorForTest.java           |  423 ++--
 .../storage/TestHBaseTimelineStorageApps.java   |  442 +++--
 .../TestHBaseTimelineStorageEntities.java       |  544 ++++--
 .../storage/TestHBaseTimelineStorageSchema.java |  135 ++
 .../storage/flow/TestFlowDataGenerator.java     |   28 +-
 .../flow/TestHBaseStorageFlowActivity.java      |   72 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  237 ++-
 .../flow/TestHBaseStorageFlowRunCompaction.java |   67 +-
 .../reader/filter/TimelineFilterUtils.java      |   17 +
 .../storage/HBaseTimelineReaderImpl.java        |   12 +-
 .../storage/HBaseTimelineWriterImpl.java        |  294 +--
 .../storage/TimelineSchemaCreator.java          |   52 +-
 .../storage/application/ApplicationColumn.java  |   48 -
 .../application/ApplicationColumnPrefix.java    |   52 -
 .../storage/application/ApplicationRowKey.java  |   49 +-
 .../storage/application/ApplicationTable.java   |    2 +-
 .../storage/apptoflow/AppToFlowColumn.java      |   47 -
 .../apptoflow/AppToFlowColumnPrefix.java        |  206 ++
 .../storage/apptoflow/AppToFlowRowKey.java      |  101 +-
 .../storage/apptoflow/AppToFlowTable.java       |   21 +-
 .../storage/common/BaseTable.java               |   37 +-
 .../storage/common/ColumnHelper.java            |   50 +-
 .../common/HBaseTimelineStorageUtils.java       |  124 +-
 .../storage/common/KeyConverterToString.java    |   38 +
 .../storage/common/LongConverter.java           |    2 +-
 .../storage/entity/EntityColumn.java            |   48 -
 .../storage/entity/EntityColumnPrefix.java      |   51 -
 .../storage/entity/EntityRowKey.java            |  100 +-
 .../storage/entity/EntityRowKeyPrefix.java      |   11 +-
 .../storage/entity/EntityTable.java             |    4 +-
 .../storage/flow/FlowActivityColumnPrefix.java  |   58 +-
 .../storage/flow/FlowActivityRowKey.java        |   59 +-
 .../storage/flow/FlowRunColumn.java             |   53 +-
 .../storage/flow/FlowRunColumnPrefix.java       |   53 +-
 .../storage/flow/FlowRunCoprocessor.java        |   36 +-
 .../storage/flow/FlowRunRowKey.java             |   47 +-
 .../storage/flow/FlowRunTable.java              |   13 +-
 .../timelineservice/storage/package-info.java   |    6 +-
 .../reader/AbstractTimelineStorageReader.java   |  158 ++
 .../storage/reader/ApplicationEntityReader.java |   77 +-
 .../storage/reader/EntityTypeReader.java        |  179 ++
 .../reader/FlowActivityEntityReader.java        |   30 +-
 .../storage/reader/FlowRunEntityReader.java     |   53 +-
 .../storage/reader/GenericEntityReader.java     |  201 +-
 .../reader/SubApplicationEntityReader.java      |  488 +++++
 .../storage/reader/TimelineEntityReader.java    |   60 +-
 .../reader/TimelineEntityReaderFactory.java     |   18 +-
 .../subapplication/SubApplicationColumn.java    |  108 +
 .../SubApplicationColumnFamily.java             |   68 +
 .../SubApplicationColumnPrefix.java             |  250 +++
 .../subapplication/SubApplicationRowKey.java    |  290 +++
 .../SubApplicationRowKeyPrefix.java             |   69 +
 .../subapplication/SubApplicationTable.java     |  174 ++
 .../storage/subapplication/package-info.java    |   28 +
 .../common/TestHBaseTimelineStorageUtils.java   |   33 +
 .../storage/common/TestKeyConverters.java       |    4 +
 .../storage/common/TestRowKeys.java             |   54 +-
 .../storage/common/TestRowKeysAsString.java     |  144 ++
 .../collector/AppLevelTimelineCollector.java    |  143 +-
 .../AppLevelTimelineCollectorWithAgg.java       |  150 ++
 .../collector/NodeTimelineCollectorManager.java |  275 ++-
 .../PerNodeTimelineCollectorsAuxService.java    |   12 +-
 .../collector/TimelineCollector.java            |   18 +-
 .../collector/TimelineCollectorManager.java     |   14 +-
 .../reader/TimelineDataToRetrieve.java          |   35 +-
 .../reader/TimelineEntityFilters.java           |  160 +-
 .../reader/TimelineReaderContext.java           |   37 +-
 .../reader/TimelineReaderManager.java           |   40 +-
 .../reader/TimelineReaderServer.java            |   77 +-
 .../reader/TimelineReaderUtils.java             |   31 +-
 .../reader/TimelineReaderWebServices.java       |  693 ++++++-
 .../reader/TimelineReaderWebServicesUtils.java  |   98 +-
 .../reader/TimelineUIDConverter.java            |   84 +-
 ...neReaderAuthenticationFilterInitializer.java |   53 +
 ...elineReaderWhitelistAuthorizationFilter.java |  123 ++
 ...WhitelistAuthorizationFilterInitializer.java |   66 +
 .../reader/security/package-info.java           |   25 +
 .../CollectorNodemanagerSecurityInfo.java       |   69 +
 ...neV2DelegationTokenSecretManagerService.java |  126 ++
 .../timelineservice/security/package-info.java  |   25 +
 .../storage/FileSystemTimelineReaderImpl.java   |   21 +
 .../storage/FileSystemTimelineWriterImpl.java   |   15 +-
 .../timelineservice/storage/TimelineReader.java |   23 +-
 .../timelineservice/storage/TimelineWriter.java |   28 +-
 .../storage/common/TimelineStorageUtils.java    |    1 -
 .../org.apache.hadoop.security.SecurityInfo     |   14 +
 .../TestNMTimelineCollectorManager.java         |    4 +-
 .../collector/TestTimelineCollector.java        |   12 +-
 .../reader/TestTimelineReaderWebServices.java   |    4 +-
 ...elineReaderWhitelistAuthorizationFilter.java |  380 ++++
 .../reader/TestTimelineUIDConverter.java        |   17 +-
 .../TestFileSystemTimelineReaderImpl.java       |  115 +-
 .../TestFileSystemTimelineWriterImpl.java       |    8 +-
 .../amfilter/TestAmFilterInitializer.java       |   15 +-
 .../src/site/markdown/Federation.md             |   56 +-
 .../src/site/markdown/TimelineServiceV2.md      |  433 +++-
 .../src/site/markdown/YarnCommands.md           |    4 +-
 .../src/main/webapp/app/adapters/yarn-queue.js  |   30 -
 .../app/adapters/yarn-queue/capacity-queue.js   |   23 +
 .../app/adapters/yarn-queue/fair-queue.js       |   23 +
 .../app/adapters/yarn-queue/fifo-queue.js       |   23 +
 .../app/adapters/yarn-queue/yarn-queue.js       |   30 +
 .../main/webapp/app/components/tree-selector.js |   19 +-
 .../main/webapp/app/models/cluster-metric.js    |    9 +-
 .../src/main/webapp/app/models/yarn-queue.js    |   94 -
 .../app/models/yarn-queue/capacity-queue.js     |   95 +
 .../webapp/app/models/yarn-queue/fair-queue.js  |   79 +
 .../webapp/app/models/yarn-queue/fifo-queue.js  |   52 +
 .../webapp/app/models/yarn-queue/yarn-queue.js  |   23 +
 .../src/main/webapp/app/models/yarn-rm-node.js  |    2 +-
 .../main/webapp/app/routes/cluster-overview.js  |    4 +-
 .../src/main/webapp/app/routes/yarn-queue.js    |   26 +-
 .../src/main/webapp/app/routes/yarn-queues.js   |   12 +-
 .../main/webapp/app/routes/yarn-queues/index.js |   25 -
 .../app/routes/yarn-queues/queues-selector.js   |   25 -
 .../main/webapp/app/serializers/yarn-queue.js   |  129 --
 .../serializers/yarn-queue/capacity-queue.js    |  128 ++
 .../app/serializers/yarn-queue/fair-queue.js    |   92 +
 .../app/serializers/yarn-queue/fifo-queue.js    |   59 +
 .../app/serializers/yarn-queue/yarn-queue.js    |   47 +
 .../webapp/app/templates/cluster-overview.hbs   |   33 +-
 .../components/queue-configuration-table.hbs    |   54 -
 .../templates/components/queue-navigator.hbs    |    7 +-
 .../yarn-queue/capacity-queue-conf-table.hbs    |   54 +
 .../yarn-queue/capacity-queue-info.hbs          |   84 +
 .../components/yarn-queue/capacity-queue.hbs    |   63 +
 .../yarn-queue/fair-queue-conf-table.hbs        |   52 +
 .../components/yarn-queue/fair-queue-info.hbs   |   66 +
 .../components/yarn-queue/fair-queue.hbs        |   63 +
 .../yarn-queue/fifo-queue-conf-table.hbs        |   56 +
 .../components/yarn-queue/fifo-queue-info.hbs   |   47 +
 .../components/yarn-queue/fifo-queue.hbs        |   48 +
 .../main/webapp/app/templates/yarn-nodes.hbs    |    2 +-
 .../webapp/app/templates/yarn-queue/info.hbs    |   73 +-
 .../main/webapp/app/templates/yarn-queues.hbs   |   54 +-
 .../src/main/webapp/app/utils/color-utils.js    |    1 -
 start-build-env.sh                              |    2 +
 673 files changed, 33269 insertions(+), 8146 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11e5f54f/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11e5f54f/hadoop-project/pom.xml
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HDFS-12258. ec -listPolicies should list all policies in system, no matter it's enabled or disabled. Contributed by Wei Zhou.

Posted by st...@apache.org.
HDFS-12258. ec -listPolicies should list all policies in system, no matter it's enabled or disabled. Contributed by Wei Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/200b1136
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/200b1136
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/200b1136

Branch: refs/heads/HADOOP-13345
Commit: 200b11368d3954138a9bce128c8fa763b4a503a1
Parents: 32cba6c
Author: Rakesh Radhakrishnan <ra...@apache.org>
Authored: Wed Aug 30 12:58:56 2017 +0530
Committer: Rakesh Radhakrishnan <ra...@apache.org>
Committed: Wed Aug 30 12:58:56 2017 +0530

----------------------------------------------------------------------
 .../hadoop/hdfs/DFSStripedOutputStream.java     |  58 +++---
 .../hadoop/hdfs/DistributedFileSystem.java      |  47 ++---
 .../apache/hadoop/hdfs/client/HdfsAdmin.java    |  19 +-
 .../hdfs/protocol/ErasureCodingPolicy.java      |  50 ++++-
 .../hdfs/protocol/ErasureCodingPolicyState.java |  73 +++++++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  18 +-
 .../src/main/proto/hdfs.proto                   |  10 +
 .../namenode/ErasureCodingPolicyManager.java    | 193 ++++++++++---------
 .../server/namenode/FSDirErasureCodingOp.java   |  29 ++-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  13 +-
 .../src/site/markdown/HDFSErasureCoding.md      |   2 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |   2 -
 .../hadoop/hdfs/TestErasureCodingPolicies.java  |  15 +-
 .../server/namenode/TestStripedINodeFile.java   |  27 ++-
 .../test/resources/testErasureCodingConf.xml    |  21 +-
 15 files changed, 376 insertions(+), 201 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 2aa9e98..842f23b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -17,32 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.nio.ByteBuffer;
-import java.nio.channels.ClosedChannelException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
@@ -52,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -62,16 +39,38 @@ import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.io.ElasticByteBufferPool;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
 import org.apache.htrace.core.TraceScope;
 
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.ClosedChannelException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
 
 /**
  * This class supports writing files in striped layout and erasure coded format.
@@ -777,7 +776,8 @@ public class DFSStripedOutputStream extends DFSOutputStream
     // should update the block group length based on the acked length
     final long sentBytes = currentBlockGroup.getNumBytes();
     final long ackedBytes = getNumAckedStripes() * cellSize * numDataBlocks;
-    Preconditions.checkState(ackedBytes <= sentBytes);
+    Preconditions.checkState(ackedBytes <= sentBytes,
+        "Acked:" + ackedBytes + ", Sent:" + sentBytes);
     currentBlockGroup.setNumBytes(ackedBytes);
     newBG.setNumBytes(ackedBytes);
     dfsClient.namenode.updatePipeline(dfsClient.clientName, currentBlockGroup,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index f3605fa..44caed6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -18,21 +18,14 @@
 
 package org.apache.hadoop.hdfs;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.CacheFlag;
@@ -53,24 +46,24 @@ import org.apache.hadoop.fs.GlobalStorageStatistics;
 import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.StorageStatistics;
-import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
-import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -81,6 +74,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
@@ -95,17 +89,22 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 
 import javax.annotation.Nonnull;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
 
 /****************************************************************
  * Implementation of the abstract FileSystem for the DFS system.
@@ -2603,7 +2602,8 @@ public class DistributedFileSystem extends FileSystem {
 
   /**
    * Retrieve all the erasure coding policies supported by this file system,
-   * excluding REPLICATION policy.
+   * including enabled, disabled and removed policies, but excluding
+   * REPLICATION policy.
    *
    * @return all erasure coding policies supported by this file system.
    * @throws IOException
@@ -2628,8 +2628,9 @@ public class DistributedFileSystem extends FileSystem {
   /**
    * Add Erasure coding policies to HDFS. For each policy input, schema and
    * cellSize are musts, name and id are ignored. They will be automatically
-   * created and assigned by Namenode once the policy is successfully added, and
-   * will be returned in the response.
+   * created and assigned by Namenode once the policy is successfully added,
+   * and will be returned in the response; policy states will be set to
+   * DISABLED automatically.
    *
    * @param policies The user defined ec policy list to add.
    * @return Return the response list of adding operations.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 85a7efe..bfc6010 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -17,12 +17,6 @@
  */
 package org.apache.hadoop.hdfs.client;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
-import java.util.EnumSet;
-
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -50,10 +44,16 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
-import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
+import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.security.AccessControlException;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collection;
+import java.util.EnumSet;
+
 /**
  * The public API for performing administrative functions on HDFS. Those writing
  * applications against HDFS should prefer this interface to directly accessing
@@ -554,8 +554,9 @@ public class HdfsAdmin {
   /**
    * Add Erasure coding policies to HDFS. For each policy input, schema and
    * cellSize are musts, name and id are ignored. They will be automatically
-   * created and assigned by Namenode once the policy is successfully added, and
-   * will be returned in the response.
+   * created and assigned by Namenode once the policy is successfully added,
+   * and will be returned in the response; policy states will be set to
+   * DISABLED automatically.
    *
    * @param policies The user defined ec policy list to add.
    * @return Return the response list of adding operations.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
index 501b67c..6cbe0e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import java.io.Serializable;
-
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
@@ -27,6 +25,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 
+import java.io.Serializable;
+
 /**
  * A policy about how to write/read/code an erasure coding file.
  */
@@ -40,9 +40,11 @@ public final class ErasureCodingPolicy implements Serializable {
   private final ECSchema schema;
   private final int cellSize;
   private byte id;
+  private ErasureCodingPolicyState state;
+
 
   public ErasureCodingPolicy(String name, ECSchema schema,
-      int cellSize, byte id) {
+      int cellSize, byte id, ErasureCodingPolicyState state) {
     Preconditions.checkNotNull(name);
     Preconditions.checkNotNull(schema);
     Preconditions.checkArgument(cellSize > 0, "cellSize must be positive");
@@ -52,14 +54,22 @@ public final class ErasureCodingPolicy implements Serializable {
     this.schema = schema;
     this.cellSize = cellSize;
     this.id = id;
+    this.state = state;
+  }
+
+  public ErasureCodingPolicy(String name, ECSchema schema, int cellSize,
+      byte id) {
+    this(name, schema, cellSize, id, ErasureCodingPolicyState.DISABLED);
   }
 
   public ErasureCodingPolicy(ECSchema schema, int cellSize, byte id) {
-    this(composePolicyName(schema, cellSize), schema, cellSize, id);
+    this(composePolicyName(schema, cellSize), schema, cellSize, id,
+        ErasureCodingPolicyState.DISABLED);
   }
 
   public ErasureCodingPolicy(ECSchema schema, int cellSize) {
-    this(composePolicyName(schema, cellSize), schema, cellSize, (byte) -1);
+    this(composePolicyName(schema, cellSize), schema, cellSize, (byte) -1,
+        ErasureCodingPolicyState.DISABLED);
   }
 
   public static String composePolicyName(ECSchema schema, int cellSize) {
@@ -108,10 +118,35 @@ public final class ErasureCodingPolicy implements Serializable {
     this.id = id;
   }
 
+
   public boolean isReplicationPolicy() {
     return (id == ErasureCodeConstants.REPLICATION_POLICY_ID);
   }
 
+  public ErasureCodingPolicyState getState() {
+    return state;
+  }
+
+  public void setState(ErasureCodingPolicyState state) {
+    this.state = state;
+  }
+
+  public boolean isSystemPolicy() {
+    return (this.id < ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
+  }
+
+  public boolean isEnabled() {
+    return (this.state == ErasureCodingPolicyState.ENABLED);
+  }
+
+  public boolean isDisabled() {
+    return (this.state == ErasureCodingPolicyState.DISABLED);
+  }
+
+  public boolean isRemoved() {
+    return (this.state == ErasureCodingPolicyState.REMOVED);
+  }
+
   @Override
   public boolean equals(Object o) {
     if (o == null) {
@@ -129,6 +164,7 @@ public final class ErasureCodingPolicy implements Serializable {
         .append(schema, rhs.schema)
         .append(cellSize, rhs.cellSize)
         .append(id, rhs.id)
+        .append(state, rhs.state)
         .isEquals();
   }
 
@@ -139,6 +175,7 @@ public final class ErasureCodingPolicy implements Serializable {
         .append(schema)
         .append(cellSize)
         .append(id)
+        .append(state)
         .toHashCode();
   }
 
@@ -147,7 +184,8 @@ public final class ErasureCodingPolicy implements Serializable {
     return "ErasureCodingPolicy=[" + "Name=" + name + ", "
         + "Schema=[" + schema.toString() + "], "
         + "CellSize=" + cellSize + ", "
-        + "Id=" + id
+        + "Id=" + id + ", "
+        + "State=" + state.toString()
         + "]";
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyState.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyState.java
new file mode 100644
index 0000000..a8586dc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyState.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Value denotes the possible states of an ErasureCodingPolicy.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum ErasureCodingPolicyState {
+
+  /** Policy is disabled. It's policy default state. */
+  DISABLED(1),
+  /** Policy is enabled. It can be applied to directory and file. */
+  ENABLED(2),
+  /**
+   * Policy is removed from the system. Due to there are potential files
+   * use this policy, it cannot be deleted from system immediately. A removed
+   * policy can be re-enabled later.*/
+  REMOVED(3);
+
+  private static final ErasureCodingPolicyState[] CACHED_VALUES =
+      ErasureCodingPolicyState.values();
+
+  private final int value;
+
+  ErasureCodingPolicyState(int v) {
+    value = v;
+  }
+
+  public int getValue() {
+    return value;
+  }
+
+  public static ErasureCodingPolicyState fromValue(int v) {
+    if (v > 0 && v <= CACHED_VALUES.length) {
+      return CACHED_VALUES[v - 1];
+    }
+    return null;
+  }
+
+  /** Read from in. */
+  public static ErasureCodingPolicyState read(DataInput in) throws IOException {
+    return fromValue(in.readByte());
+  }
+
+  /** Write to out. */
+  public void write(DataOutput out) throws IOException {
+    out.writeByte(ordinal());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 30a3108..04f9686 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -2925,6 +2926,16 @@ public class PBHelperClient {
     return builder.build();
   }
 
+  public static ErasureCodingPolicyState convertECState(
+      HdfsProtos.ErasureCodingPolicyState state) {
+    return ErasureCodingPolicyState.fromValue(state.getNumber());
+  }
+
+  public static HdfsProtos.ErasureCodingPolicyState convertECState(
+      ErasureCodingPolicyState state) {
+    return HdfsProtos.ErasureCodingPolicyState.valueOf(state.getValue());
+  }
+
   public static ErasureCodingPolicy convertErasureCodingPolicy(
       ErasureCodingPolicyProto proto) {
     final byte id = (byte) (proto.getId() & 0xFF);
@@ -2938,10 +2949,12 @@ public class PBHelperClient {
           "Missing schema field in ErasureCodingPolicy proto");
       Preconditions.checkArgument(proto.hasCellSize(),
           "Missing cellsize field in ErasureCodingPolicy proto");
+      Preconditions.checkArgument(proto.hasState(),
+          "Missing state field in ErasureCodingPolicy proto");
 
       return new ErasureCodingPolicy(proto.getName(),
           convertECSchema(proto.getSchema()),
-          proto.getCellSize(), id);
+          proto.getCellSize(), id, convertECState(proto.getState()));
     }
     return policy;
   }
@@ -2955,7 +2968,8 @@ public class PBHelperClient {
     if (SystemErasureCodingPolicies.getByID(policy.getId()) == null) {
       builder.setName(policy.getName())
           .setSchema(convertECSchema(policy.getSchema()))
-          .setCellSize(policy.getCellSize());
+          .setCellSize(policy.getCellSize())
+          .setState(convertECState(policy.getState()));
     }
     return builder.build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
index 59381bc..ddb5566 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
@@ -373,11 +373,21 @@ message ECSchemaProto {
   repeated ECSchemaOptionEntryProto options = 4;
 }
 
+/**
+ * EC policy state.
+ */
+enum ErasureCodingPolicyState {
+  DISABLED = 1;
+  ENABLED = 2;
+  REMOVED = 3;
+}
+
 message ErasureCodingPolicyProto {
   optional string name = 1;
   optional ECSchemaProto schema = 2;
   optional uint32 cellSize = 3;
   required uint32 id = 4; // Actually a byte - only 8 bits used
+  optional ErasureCodingPolicyState state = 5 [default = ENABLED];
 }
 
 message AddECPolicyResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 404a0aa..74b5ebf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -17,10 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
 import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -31,11 +33,11 @@ import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.stream.Collectors;
-import java.util.stream.Stream;
 
 /**
  * This manages erasure coding policies predefined and activated in the system.
@@ -60,25 +62,32 @@ public final class ErasureCodingPolicyManager {
           HdfsConstants.ALLSSD_STORAGE_POLICY_ID};
 
   /**
-   * All user defined policies sorted by name for fast querying.
+   * All policies sorted by name for fast querying, include built-in policy,
+   * user defined policy, removed policy.
    */
-  private Map<String, ErasureCodingPolicy> userPoliciesByName;
+  private Map<String, ErasureCodingPolicy> policiesByName;
 
   /**
-   * All user defined policies sorted by ID for fast querying.
+   * All policies sorted by ID for fast querying, including built-in policy,
+   * user defined policy, removed policy.
    */
-  private Map<Byte, ErasureCodingPolicy> userPoliciesByID;
+  private Map<Byte, ErasureCodingPolicy> policiesByID;
 
   /**
-   * All removed policies sorted by name.
+   * For better performance when query all Policies.
    */
-  private Map<String, ErasureCodingPolicy> removedPoliciesByName;
+  private ErasureCodingPolicy[] allPolicies;
 
   /**
-   * All enabled policies maintained in NN memory for fast querying,
-   * identified and sorted by its name.
+   * All enabled policies sorted by name for fast querying, including built-in
+   * policy, user defined policy.
    */
   private Map<String, ErasureCodingPolicy> enabledPoliciesByName;
+  /**
+   * For better performance when query all enabled Policies.
+   */
+  private ErasureCodingPolicy[] enabledPolicies;
+
 
   private volatile static ErasureCodingPolicyManager instance = null;
 
@@ -101,46 +110,51 @@ public final class ErasureCodingPolicyManager {
             DFSConfigKeys.DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT);
     final String[] policyNames =
             (String[]) ArrayUtils.add(enablePolicyNames, defaultPolicyName);
-    this.userPoliciesByID = new TreeMap<>();
-    this.userPoliciesByName = new TreeMap<>();
-    this.removedPoliciesByName = new TreeMap<>();
+    this.policiesByName = new TreeMap<>();
+    this.policiesByID = new TreeMap<>();
     this.enabledPoliciesByName = new TreeMap<>();
+
+    /**
+     * TODO: load user defined EC policy from fsImage HDFS-7859
+     * load persistent policies from image and editlog, which is done only once
+     * during NameNode startup. This can be done here or in a separate method.
+     */
+
+    /*
+     * Add all System built-in policies into policy map
+     */
+    for (ErasureCodingPolicy policy :
+        SystemErasureCodingPolicies.getPolicies()) {
+      policiesByName.put(policy.getName(), policy);
+      policiesByID.put(policy.getId(), policy);
+    }
+
     for (String policyName : policyNames) {
       if (policyName.trim().isEmpty()) {
         continue;
       }
-      ErasureCodingPolicy ecPolicy =
-          SystemErasureCodingPolicies.getByName(policyName);
+      ErasureCodingPolicy ecPolicy = policiesByName.get(policyName);
       if (ecPolicy == null) {
-        ecPolicy = userPoliciesByName.get(policyName);
-        if (ecPolicy == null) {
-          String allPolicies = SystemErasureCodingPolicies.getPolicies()
-              .stream().map(ErasureCodingPolicy::getName)
-              .collect(Collectors.joining(", ")) + ", " +
-              userPoliciesByName.values().stream()
-              .map(ErasureCodingPolicy::getName)
-              .collect(Collectors.joining(", "));
-          String msg = String.format("EC policy '%s' specified at %s is not a "
-              + "valid policy. Please choose from list of available "
-              + "policies: [%s]",
-              policyName,
-              DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-              allPolicies);
-          throw new IllegalArgumentException(msg);
-        }
+        String names = policiesByName.values()
+            .stream().map(ErasureCodingPolicy::getName)
+            .collect(Collectors.joining(", "));
+        String msg = String.format("EC policy '%s' specified at %s is not a "
+                + "valid policy. Please choose from list of available "
+                + "policies: [%s]",
+            policyName,
+            DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
+            names);
+        throw new IllegalArgumentException(msg);
       }
       enabledPoliciesByName.put(ecPolicy.getName(), ecPolicy);
     }
+    enabledPolicies =
+        enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
+    allPolicies = policiesByName.values().toArray(new ErasureCodingPolicy[0]);
 
     maxCellSize = conf.getInt(
         DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY,
         DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT);
-
-    /**
-     * TODO: HDFS-7859 persist into NameNode
-     * load persistent policies from image and editlog, which is done only once
-     * during NameNode startup. This can be done here or in a separate method.
-     */
   }
 
   /**
@@ -148,9 +162,7 @@ public final class ErasureCodingPolicyManager {
    * @return all policies
    */
   public ErasureCodingPolicy[] getEnabledPolicies() {
-    ErasureCodingPolicy[] results =
-        new ErasureCodingPolicy[enabledPoliciesByName.size()];
-    return enabledPoliciesByName.values().toArray(results);
+    return enabledPolicies;
   }
 
   /**
@@ -187,9 +199,7 @@ public final class ErasureCodingPolicyManager {
    * @return all policies
    */
   public ErasureCodingPolicy[] getPolicies() {
-    return Stream.concat(SystemErasureCodingPolicies.getPolicies().stream(),
-        userPoliciesByName.values().stream())
-        .toArray(ErasureCodingPolicy[]::new);
+    return allPolicies;
   }
 
   /**
@@ -197,11 +207,7 @@ public final class ErasureCodingPolicyManager {
    * @return ecPolicy, or null if not found
    */
   public ErasureCodingPolicy getByID(byte id) {
-    ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByID(id);
-    if (policy == null) {
-      return this.userPoliciesByID.get(id);
-    }
-    return policy;
+    return this.policiesByID.get(id);
   }
 
   /**
@@ -209,11 +215,7 @@ public final class ErasureCodingPolicyManager {
    * @return ecPolicy, or null if not found
    */
   public ErasureCodingPolicy getByName(String name) {
-    ErasureCodingPolicy policy = SystemErasureCodingPolicies.getByName(name);
-    if (policy == null) {
-      return this.userPoliciesByName.get(name);
-    }
-    return policy;
+    return this.policiesByName.get(name);
   }
 
   /**
@@ -230,6 +232,9 @@ public final class ErasureCodingPolicyManager {
    */
   public synchronized ErasureCodingPolicy addPolicy(ErasureCodingPolicy policy)
       throws IllegalECPolicyException {
+    // Set policy state into DISABLED when adding into Hadoop.
+    policy.setState(ErasureCodingPolicyState.DISABLED);
+
     if (!CodecUtil.hasCodec(policy.getCodecName())) {
       throw new IllegalECPolicyException("Codec name "
           + policy.getCodecName() + " is not supported");
@@ -256,15 +261,17 @@ public final class ErasureCodingPolicyManager {
     }
     policy.setName(assignedNewName);
     policy.setId(getNextAvailablePolicyID());
-    this.userPoliciesByName.put(policy.getName(), policy);
-    this.userPoliciesByID.put(policy.getId(), policy);
+    this.policiesByName.put(policy.getName(), policy);
+    this.policiesByID.put(policy.getId(), policy);
+    allPolicies = policiesByName.values().toArray(new ErasureCodingPolicy[0]);
     return policy;
   }
 
   private byte getNextAvailablePolicyID() {
-    byte currentId = this.userPoliciesByID.keySet().stream()
-        .max(Byte::compareTo).orElse(
-            ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
+    byte currentId = this.policiesByID.keySet().stream()
+        .max(Byte::compareTo)
+        .filter(id -> id >= ErasureCodeConstants.USER_DEFINED_POLICY_START_ID)
+        .orElse(ErasureCodeConstants.USER_DEFINED_POLICY_START_ID);
     return (byte) (currentId + 1);
   }
 
@@ -272,69 +279,71 @@ public final class ErasureCodingPolicyManager {
    * Remove an User erasure coding policy by policyName.
    */
   public synchronized void removePolicy(String name) {
-    if (SystemErasureCodingPolicies.getByName(name) != null) {
+    ErasureCodingPolicy ecPolicy = policiesByName.get(name);
+    if (ecPolicy == null) {
+      throw new IllegalArgumentException("The policy name " +
+          name + " does not exists");
+    }
+
+    if (ecPolicy.isSystemPolicy()) {
       throw new IllegalArgumentException("System erasure coding policy " +
           name + " cannot be removed");
     }
-    ErasureCodingPolicy policy = userPoliciesByName.get(name);
-    if (policy == null) {
-      throw new IllegalArgumentException("The policy name " +
-          name + " does not exists");
+
+    if (enabledPoliciesByName.containsKey(name)) {
+      enabledPoliciesByName.remove(name);
+      enabledPolicies =
+          enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
     }
-    enabledPoliciesByName.remove(name);
-    removedPoliciesByName.put(name, policy);
+    ecPolicy.setState(ErasureCodingPolicyState.REMOVED);
+    LOG.info("Remove erasure coding policy " + name);
   }
 
+  @VisibleForTesting
   public List<ErasureCodingPolicy> getRemovedPolicies() {
-    return removedPoliciesByName.values().stream().collect(Collectors.toList());
+    ArrayList<ErasureCodingPolicy> removedPolicies =
+        new ArrayList<ErasureCodingPolicy>();
+    for (ErasureCodingPolicy ecPolicy : policiesByName.values()) {
+      if (ecPolicy.isRemoved()) {
+        removedPolicies.add(ecPolicy);
+      }
+    }
+    return removedPolicies;
   }
 
   /**
    * Disable an erasure coding policy by policyName.
    */
   public synchronized void disablePolicy(String name) {
-    ErasureCodingPolicy sysEcPolicy = SystemErasureCodingPolicies
-        .getByName(name);
-    ErasureCodingPolicy userEcPolicy = userPoliciesByName.get(name);
-    LOG.info("Disable the erasure coding policy " + name);
-    if (sysEcPolicy == null &&
-        userEcPolicy == null) {
+    ErasureCodingPolicy ecPolicy = policiesByName.get(name);
+    if (ecPolicy == null) {
       throw new IllegalArgumentException("The policy name " +
           name + " does not exists");
     }
 
-    if(sysEcPolicy != null){
-      enabledPoliciesByName.remove(name);
-      removedPoliciesByName.put(name, sysEcPolicy);
-    }
-    if(userEcPolicy != null){
+    if (enabledPoliciesByName.containsKey(name)) {
       enabledPoliciesByName.remove(name);
-      removedPoliciesByName.put(name, userEcPolicy);
+      enabledPolicies =
+          enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
     }
+    ecPolicy.setState(ErasureCodingPolicyState.DISABLED);
+    LOG.info("Disable the erasure coding policy " + name);
   }
 
   /**
    * Enable an erasure coding policy by policyName.
    */
   public synchronized void enablePolicy(String name) {
-    ErasureCodingPolicy sysEcPolicy = SystemErasureCodingPolicies
-        .getByName(name);
-    ErasureCodingPolicy userEcPolicy = userPoliciesByName.get(name);
-    LOG.info("Enable the erasure coding policy " + name);
-    if (sysEcPolicy == null &&
-        userEcPolicy == null) {
+    ErasureCodingPolicy ecPolicy = policiesByName.get(name);
+    if (ecPolicy == null) {
       throw new IllegalArgumentException("The policy name " +
           name + " does not exists");
     }
 
-    if(sysEcPolicy != null){
-      enabledPoliciesByName.put(name, sysEcPolicy);
-      removedPoliciesByName.remove(name);
-    }
-    if(userEcPolicy != null) {
-      enabledPoliciesByName.put(name, userEcPolicy);
-      removedPoliciesByName.remove(name);
-    }
+    enabledPoliciesByName.put(name, ecPolicy);
+    ecPolicy.setState(ErasureCodingPolicyState.ENABLED);
+    enabledPolicies =
+        enabledPoliciesByName.values().toArray(new ErasureCodingPolicy[0]);
+    LOG.info("Enable the erasure coding policy " + name);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 426b42b..4f4befe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -17,21 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.XAttr;
@@ -39,14 +26,26 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.XAttrHelper;
-import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.IllegalECPolicyException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.erasurecode.CodecRegistry;
 import org.apache.hadoop.security.AccessControlException;
 
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY;
 
 /**
@@ -341,7 +340,7 @@ final class FSDirErasureCodingOp {
   static ErasureCodingPolicy[] getErasureCodingPolicies(final FSNamesystem fsn)
       throws IOException {
     assert fsn.hasReadLock();
-    return fsn.getErasureCodingPolicyManager().getEnabledPolicies();
+    return fsn.getErasureCodingPolicyManager().getPolicies();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index 55d85ff..0b4e0c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -20,7 +20,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -100,9 +99,7 @@ public class ECAdmin extends Configured implements Tool {
     @Override
     public String getLongUsage() {
       return getShortUsage() + "\n" +
-          "Get the list of enabled erasure coding policies.\n" +
-          "Policies can be enabled on the NameNode via `" +
-          DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY + "`.\n";
+          "Get the list of all erasure coding policies.\n";
     }
 
     @Override
@@ -117,17 +114,13 @@ public class ECAdmin extends Configured implements Tool {
         Collection<ErasureCodingPolicy> policies =
             dfs.getAllErasureCodingPolicies();
         if (policies.isEmpty()) {
-          System.out.println("No erasure coding policies are enabled on the " +
+          System.out.println("There is no erasure coding policies in the " +
               "cluster.");
-          System.out.println("The set of enabled policies can be " +
-              "configured at '" +
-              DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY + "' on the " +
-              "NameNode.");
         } else {
           System.out.println("Erasure Coding Policies:");
           for (ErasureCodingPolicy policy : policies) {
             if (policy != null) {
-              System.out.println("\t" + policy.getName());
+              System.out.println(policy.toString());
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index a9c0f3d..5bd7c6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -192,7 +192,7 @@ Below are the details about each command.
 
  *  `[-listPolicies]`
 
-     Lists the set of enabled erasure coding policies. These names are suitable for use with the `setPolicy` command.
+     Lists all (enabled, disabled and removed) erasure coding policies registered in HDFS. Only the enabled policies are suitable for use with the `setPolicy` command.
 
  *  `[-addPolicies -policyFile <file>]`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 9525609..bf2b002 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -1608,8 +1608,6 @@ public class TestDistributedFileSystem {
       assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
           getByName(policyName).getName());
       fs.disableErasureCodingPolicy(policyName);
-      assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
-          getRemovedPolicies().get(0).getName());
       fs.enableErasureCodingPolicy(policyName);
       assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
           getByName(policyName).getName());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index c0946f7..0c545be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -211,7 +211,8 @@ public class TestErasureCodingPolicies {
 
     // Only default policy should be enabled after restart
     Assert.assertEquals("Only default policy should be enabled after restart",
-        1, fs.getAllErasureCodingPolicies().size());
+        1,
+        ErasureCodingPolicyManager.getInstance().getEnabledPolicies().length);
 
     // Already set directory-level policies should still be in effect
     Path disabledPolicy = new Path(dir1, "afterDisabled");
@@ -383,6 +384,18 @@ public class TestErasureCodingPolicies {
         .getAllErasureCodingPolicies();
     assertTrue("All system policies should be enabled",
         allECPolicies.containsAll(SystemErasureCodingPolicies.getPolicies()));
+
+    // Query after add a new policy
+    ECSchema toAddSchema = new ECSchema("rs", 5, 2);
+    ErasureCodingPolicy newPolicy =
+        new ErasureCodingPolicy(toAddSchema, 128 * 1024);
+    ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy};
+    fs.addErasureCodingPolicies(policyArray);
+    allECPolicies = fs.getAllErasureCodingPolicies();
+    assertEquals("Should return new added policy",
+        SystemErasureCodingPolicies.getPolicies().size() + 1,
+        allECPolicies.size());
+
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
index d2f41be..c71d049 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -17,15 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
-import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +33,7 @@ import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -50,14 +42,23 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
 
+import java.io.IOException;
+
+import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
+import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 /**
  * This class tests INodeFile with striped feature.
  */
@@ -88,6 +89,12 @@ public class TestStripedINodeFile {
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
+  @Before
+  public void init() {
+    Configuration conf = new HdfsConfiguration();
+    ErasureCodingPolicyManager.getInstance().init(conf);
+  }
+
   @Test
   public void testInvalidECPolicy() throws IllegalArgumentException {
     thrown.expect(IllegalArgumentException.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/200b1136/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 1613323..9988ff3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -135,7 +135,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Get the list of enabled erasure coding policies</expected-output>
+          <expected-output>Get the list of all erasure coding policies</expected-output>
         </comparator>
         <comparator>
           <type>SubstringComparator</type>
@@ -411,6 +411,25 @@
     </test>
 
     <test>
+      <description>listPolicies : get the list of ECPolicies supported</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -listPolicies</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>XOR-2-1-128k</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>State=DISABLED</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
       <description>enablePolicy : enable the erasure coding policy</description>
       <test-commands>
         <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-1024k</ec-admin-command>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: YARN-6733. Add table for storing sub-application entities. Contributed by Vrushali C.

Posted by st...@apache.org.
YARN-6733. Add table for storing sub-application entities. Contributed by Vrushali C.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a990ff70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a990ff70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a990ff70

Branch: refs/heads/HADOOP-13345
Commit: a990ff70c25e2ab746578500720c531f23e0851e
Parents: 61136d0
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Tue Jul 25 15:25:21 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:53 2017 +0530

----------------------------------------------------------------------
 .../storage/TimelineSchemaCreator.java          |  44 +++
 .../storage/application/ApplicationTable.java   |   2 +-
 .../storage/entity/EntityRowKey.java            |   6 +-
 .../subapplication/SubApplicationColumn.java    | 108 +++++++
 .../SubApplicationColumnFamily.java             |  68 +++++
 .../SubApplicationColumnPrefix.java             | 250 ++++++++++++++++
 .../subapplication/SubApplicationRowKey.java    | 290 +++++++++++++++++++
 .../SubApplicationRowKeyPrefix.java             |  89 ++++++
 .../subapplication/SubApplicationTable.java     | 174 +++++++++++
 .../storage/subapplication/package-info.java    |  28 ++
 .../storage/common/TestKeyConverters.java       |   4 +
 .../storage/common/TestRowKeys.java             |  26 ++
 .../storage/common/TestRowKeysAsString.java     |  29 ++
 13 files changed, 1114 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index 15885ce..210fd85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelin
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
@@ -63,7 +64,9 @@ public final class TimelineSchemaCreator {
       LoggerFactory.getLogger(TimelineSchemaCreator.class);
   private static final String SKIP_EXISTING_TABLE_OPTION_SHORT = "s";
   private static final String APP_METRICS_TTL_OPTION_SHORT = "ma";
+  private static final String SUB_APP_METRICS_TTL_OPTION_SHORT = "msa";
   private static final String APP_TABLE_NAME_SHORT = "a";
+  private static final String SUB_APP_TABLE_NAME_SHORT = "sa";
   private static final String APP_TO_FLOW_TABLE_NAME_SHORT = "a2f";
   private static final String ENTITY_METRICS_TTL_OPTION_SHORT = "me";
   private static final String ENTITY_TABLE_NAME_SHORT = "e";
@@ -121,6 +124,21 @@ public final class TimelineSchemaCreator {
         new ApplicationTable().setMetricsTTL(appMetricsTTL, hbaseConf);
       }
 
+      // Grab the subApplicationTableName argument
+      String subApplicationTableName = commandLine.getOptionValue(
+          SUB_APP_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(subApplicationTableName)) {
+        hbaseConf.set(SubApplicationTable.TABLE_NAME_CONF_NAME,
+            subApplicationTableName);
+      }
+      // Grab the subApplication metrics TTL
+      String subApplicationTableMetricsTTL = commandLine
+          .getOptionValue(SUB_APP_METRICS_TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(subApplicationTableMetricsTTL)) {
+        int subAppMetricsTTL = Integer.parseInt(subApplicationTableMetricsTTL);
+        new SubApplicationTable().setMetricsTTL(subAppMetricsTTL, hbaseConf);
+      }
+
       // create all table schemas in hbase
       final boolean skipExisting = commandLine.hasOption(
           SKIP_EXISTING_TABLE_OPTION_SHORT);
@@ -182,6 +200,18 @@ public final class TimelineSchemaCreator {
     o.setRequired(false);
     options.addOption(o);
 
+    o = new Option(SUB_APP_TABLE_NAME_SHORT, "subApplicationTableName", true,
+        "subApplication table name");
+    o.setArgName("subApplicationTableName");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(SUB_APP_METRICS_TTL_OPTION_SHORT, "subApplicationMetricsTTL",
+        true, "TTL for metrics column family");
+    o.setArgName("subApplicationMetricsTTL");
+    o.setRequired(false);
+    options.addOption(o);
+
     // Options without an argument
     // No need to set arg name since we do not need an argument here
     o = new Option(SKIP_EXISTING_TABLE_OPTION_SHORT, "skipExistingTable",
@@ -220,6 +250,11 @@ public final class TimelineSchemaCreator {
         " The name of the Application table\n");
     usage.append("[-applicationMetricsTTL <Application Table Metrics TTL>]" +
         " TTL for metrics in the Application table\n");
+    usage.append("[-subApplicationTableName <SubApplication Table Name>]" +
+        " The name of the SubApplication table\n");
+    usage.append("[-subApplicationMetricsTTL " +
+        " <SubApplication Table Metrics TTL>]" +
+        " TTL for metrics in the SubApplication table\n");
     usage.append("[-skipExistingTable] Whether to skip existing" +
         " hbase tables\n");
     System.out.println(usage.toString());
@@ -312,6 +347,15 @@ public final class TimelineSchemaCreator {
           throw e;
         }
       }
+      try {
+        new SubApplicationTable().createTable(admin, hbaseConf);
+      } catch (IOException e) {
+        if (skipExisting) {
+          LOG.warn("Skip and continue on: " + e.getMessage());
+        } else {
+          throw e;
+        }
+      }
     } finally {
       if (conn != null) {
         conn.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
index d3bdd39..4da720e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
@@ -70,7 +70,7 @@ import org.slf4j.LoggerFactory;
 public class ApplicationTable extends BaseTable<ApplicationTable> {
   /** application prefix. */
   private static final String PREFIX =
-      YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".application";
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "application";
 
   /** config param name that specifies the application table name. */
   public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
index 7bf02f2..b85a9b0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
@@ -209,18 +209,18 @@ public class EntityRowKey {
             Separator.EMPTY_BYTES);
       }
 
-      byte[] enitityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
+      byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
 
       if (rowKey.getEntityId() == null) {
         return Separator.QUALIFIERS.join(first, second, third, entityType,
-            enitityIdPrefix, Separator.EMPTY_BYTES);
+            entityIdPrefix, Separator.EMPTY_BYTES);
       }
 
       byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
           Separator.TAB, Separator.QUALIFIERS);
 
       byte[] fourth =
-          Separator.QUALIFIERS.join(entityType, enitityIdPrefix, entityId);
+          Separator.QUALIFIERS.join(entityType, entityIdPrefix, entityId);
 
       return Separator.QUALIFIERS.join(first, second, third, fourth);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
new file mode 100644
index 0000000..46b0cc9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies fully qualified columns for the {@link SubApplicationTable}.
+ */
+public enum SubApplicationColumn implements Column<SubApplicationTable> {
+
+  /**
+   * Identifier for the sub application.
+   */
+  ID(SubApplicationColumnFamily.INFO, "id"),
+
+  /**
+   * The type of sub application.
+   */
+  TYPE(SubApplicationColumnFamily.INFO, "type"),
+
+  /**
+   * When the sub application was created.
+   */
+  CREATED_TIME(SubApplicationColumnFamily.INFO, "created_time",
+      new LongConverter()),
+
+  /**
+   * The version of the flow that this sub application belongs to.
+   */
+  FLOW_VERSION(SubApplicationColumnFamily.INFO, "flow_version");
+
+  private final ColumnHelper<SubApplicationTable> column;
+  private final ColumnFamily<SubApplicationTable> columnFamily;
+  private final String columnQualifier;
+  private final byte[] columnQualifierBytes;
+
+  SubApplicationColumn(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnQualifier) {
+    this(columnFamily, columnQualifier, GenericConverter.getInstance());
+  }
+
+  SubApplicationColumn(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnQualifier, ValueConverter converter) {
+    this.columnFamily = columnFamily;
+    this.columnQualifier = columnQualifier;
+    // Future-proof by ensuring the right column prefix hygiene.
+    this.columnQualifierBytes =
+        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
+    this.column = new ColumnHelper<SubApplicationTable>(columnFamily,
+        converter);
+  }
+
+
+  public void store(byte[] rowKey,
+      TypedBufferedMutator<SubApplicationTable> tableMutator, Long timestamp,
+      Object inputValue, Attribute... attributes) throws IOException {
+    column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
+        inputValue, attributes);
+  }
+
+  public Object readResult(Result result) throws IOException {
+    return column.readResult(result, columnQualifierBytes);
+  }
+
+  @Override
+  public byte[] getColumnQualifierBytes() {
+    return columnQualifierBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return column.getValueConverter();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
new file mode 100644
index 0000000..1d7f8fd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the sub application table column families.
+ */
+public enum SubApplicationColumnFamily
+    implements ColumnFamily<SubApplicationTable> {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons:
+   * a) the size of the config values can be very large and
+   * b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value
+   *          create a column family with this name. Must be lower case and
+   *          without spaces.
+   */
+  SubApplicationColumnFamily(String value) {
+    // column families should be lower case and not contain any spaces.
+    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+    return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
new file mode 100644
index 0000000..06ecced
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
@@ -0,0 +1,250 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the sub app table.
+ */
+public enum SubApplicationColumnPrefix
+    implements ColumnPrefix<SubApplicationTable> {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(SubApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(SubApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(SubApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an entity.
+   */
+  EVENT(SubApplicationColumnFamily.INFO, "e", true),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(SubApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(SubApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnHelper<SubApplicationTable> column;
+  private final ColumnFamily<SubApplicationTable> columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix) {
+    this(columnFamily, columnPrefix, false, GenericConverter.getInstance());
+  }
+
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, boolean compondColQual) {
+    this(columnFamily, columnPrefix, compondColQual,
+        GenericConverter.getInstance());
+  }
+
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, ValueConverter converter) {
+    this(columnFamily, columnPrefix, false, converter);
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, boolean compondColQual, ValueConverter converter) {
+    column = new ColumnHelper<SubApplicationTable>(columnFamily, converter);
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes =
+          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
+    }
+  }
+
+  /**
+   * @return the column name value
+   */
+  public String getColumnPrefix() {
+    return columnPrefix;
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return column.getValueConverter();
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #store(byte[],
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
+   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object,
+   * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
+   */
+  public void store(byte[] rowKey,
+      TypedBufferedMutator<SubApplicationTable> tableMutator, String qualifier,
+      Long timestamp, Object inputValue, Attribute... attributes)
+      throws IOException {
+
+    // Null check
+    if (qualifier == null) {
+      throw new IOException("Cannot store column with null qualifier in "
+          + tableMutator.getName().getNameAsString());
+    }
+
+    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
+
+    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
+        attributes);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #store(byte[],
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
+   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
+   */
+  public void store(byte[] rowKey,
+      TypedBufferedMutator<SubApplicationTable> tableMutator, byte[] qualifier,
+      Long timestamp, Object inputValue, Attribute... attributes)
+      throws IOException {
+
+    // Null check
+    if (qualifier == null) {
+      throw new IOException("Cannot store column with null qualifier in "
+          + tableMutator.getName().getNameAsString());
+    }
+
+    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
+
+    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
+        attributes);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
+   */
+  public Object readResult(Result result, String qualifier) throws IOException {
+    byte[] columnQualifier =
+        ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier);
+    return column.readResult(result, columnQualifier);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #readResults(org.apache.hadoop.hbase.client.Result,
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
+   */
+  public <K> Map<K, Object> readResults(Result result,
+      KeyConverter<K> keyConverter) throws IOException {
+    return column.readResults(result, columnPrefixBytes, keyConverter);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
+   * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
+   */
+  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
+      readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
+      throws IOException {
+    return column.readResultsWithTimestamps(result, columnPrefixBytes,
+        keyConverter);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
new file mode 100644
index 0000000..fb1f774
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the sub app table.
+ */
+public class SubApplicationRowKey {
+  private final String subAppUserId;
+  private final String clusterId;
+  private final String entityType;
+  private final Long entityIdPrefix;
+  private final String entityId;
+  private final String userId;
+  private final SubApplicationRowKeyConverter subAppRowKeyConverter =
+      new SubApplicationRowKeyConverter();
+
+  public SubApplicationRowKey(String subAppUserId, String clusterId,
+      String entityType, Long entityIdPrefix, String entityId, String userId) {
+    this.subAppUserId = subAppUserId;
+    this.clusterId = clusterId;
+    this.entityType = entityType;
+    this.entityIdPrefix = entityIdPrefix;
+    this.entityId = entityId;
+    this.userId = userId;
+  }
+
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  public String getSubAppUserId() {
+    return subAppUserId;
+  }
+
+  public String getEntityType() {
+    return entityType;
+  }
+
+  public String getEntityId() {
+    return entityId;
+  }
+
+  public Long getEntityIdPrefix() {
+    return entityIdPrefix;
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  /**
+   * Constructs a row key for the sub app table as follows:
+   * {@code subAppUserId!clusterId!entityType
+   * !entityPrefix!entityId!userId}.
+   * Typically used while querying a specific sub app.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   * @return byte array with the row key.
+   */
+  public byte[] getRowKey() {
+    return subAppRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey byte representation of row key.
+   * @return An <cite>SubApplicationRowKey</cite> object.
+   */
+  public static SubApplicationRowKey parseRowKey(byte[] rowKey) {
+    return new SubApplicationRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the sub app table as follows:
+   * <p>
+   * {@code subAppUserId!clusterId!
+   * entityType!entityIdPrefix!entityId!userId}.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that that the AM runs as.
+   *
+   * </p>
+   *
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return subAppRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   *
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>SubApplicationRowKey</cite> object.
+   */
+  public static SubApplicationRowKey parseRowKeyFromString(
+      String encodedRowKey) {
+    return new SubApplicationRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for sub app table.
+   * The row key is of the form :
+   * subAppUserId!clusterId!flowRunId!appId!entityType!entityId!userId
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   * <p>
+   */
+  final private static class SubApplicationRowKeyConverter
+      implements KeyConverter<SubApplicationRowKey>,
+      KeyConverterToString<SubApplicationRowKey> {
+
+    private SubApplicationRowKeyConverter() {
+    }
+
+    /**
+     * sub app row key is of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     * w. each segment separated by !.
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * The sizes below indicate sizes of each one of these
+     * segments in sequence. clusterId, subAppUserId, entityType,
+     * entityId and userId are strings.
+     * entity prefix is a long hence 8 bytes in size. Strings are
+     * variable in size (i.e. end whenever separator is encountered).
+     * This is used while decoding and helps in determining where to split.
+     */
+    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE};
+
+    /*
+     * (non-Javadoc)
+     *
+     * Encodes SubApplicationRowKey object into a byte array with each
+     * component/field in SubApplicationRowKey separated by
+     * Separator#QUALIFIERS.
+     * This leads to an sub app table row key of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * If entityType in passed SubApplicationRowKey object is null (and the
+     * fields preceding it are not null i.e. clusterId, subAppUserId), this
+     * returns a row key prefix of the form subAppUserId!clusterId!
+     * If entityId in SubApplicationRowKey is null
+     * (other components are not null), this returns a row key prefix
+     * of the form subAppUserId!clusterId!entityType!
+     *
+     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#encode(java.lang.Object)
+     */
+    @Override
+    public byte[] encode(SubApplicationRowKey rowKey) {
+      byte[] subAppUser = Separator.encode(rowKey.getSubAppUserId(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+      byte[] cluster = Separator.encode(rowKey.getClusterId(), Separator.SPACE,
+          Separator.TAB, Separator.QUALIFIERS);
+      byte[] first = Separator.QUALIFIERS.join(subAppUser, cluster);
+      if (rowKey.getEntityType() == null) {
+        return first;
+      }
+      byte[] entityType = Separator.encode(rowKey.getEntityType(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+
+      if (rowKey.getEntityIdPrefix() == null) {
+        return Separator.QUALIFIERS.join(first, entityType,
+            Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
+
+      if (rowKey.getEntityId() == null) {
+        return Separator.QUALIFIERS.join(first, entityType, entityIdPrefix,
+            Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
+          Separator.TAB, Separator.QUALIFIERS);
+
+      byte[] userId = Separator.encode(rowKey.getUserId(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+
+      byte[] second = Separator.QUALIFIERS.join(entityType, entityIdPrefix,
+          entityId, userId);
+
+      return Separator.QUALIFIERS.join(first, second);
+    }
+
+    /*
+     * (non-Javadoc)
+     *
+     * Decodes a sub application row key of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * represented in byte format
+     * and converts it into an SubApplicationRowKey object.
+     *
+     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#decode(byte[])
+     */
+    @Override
+    public SubApplicationRowKey decode(byte[] rowKey) {
+      byte[][] rowKeyComponents =
+          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
+      if (rowKeyComponents.length != 6) {
+        throw new IllegalArgumentException(
+            "the row key is not valid for " + "a sub app");
+      }
+      String subAppUserId =
+          Separator.decode(Bytes.toString(rowKeyComponents[0]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String clusterId = Separator.decode(Bytes.toString(rowKeyComponents[1]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String entityType = Separator.decode(Bytes.toString(rowKeyComponents[2]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+
+      Long entityPrefixId = Bytes.toLong(rowKeyComponents[3]);
+
+      String entityId = Separator.decode(Bytes.toString(rowKeyComponents[4]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String userId =
+          Separator.decode(Bytes.toString(rowKeyComponents[5]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+
+      return new SubApplicationRowKey(subAppUserId, clusterId, entityType,
+          entityPrefixId, entityId, userId);
+    }
+
+    @Override
+    public String encodeAsString(SubApplicationRowKey key) {
+      if (key.subAppUserId == null || key.clusterId == null
+          || key.entityType == null || key.entityIdPrefix == null
+          || key.entityId == null || key.userId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(
+          new String[] {key.subAppUserId, key.clusterId, key.entityType,
+              key.entityIdPrefix.toString(), key.entityId, key.userId});
+    }
+
+    @Override
+    public SubApplicationRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 6) {
+        throw new IllegalArgumentException(
+            "Invalid row key for sub app table.");
+      }
+      Long entityIdPrefix = Long.valueOf(split.get(3));
+      return new SubApplicationRowKey(split.get(0), split.get(1),
+          split.get(2), entityIdPrefix, split.get(4), split.get(5));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
new file mode 100644
index 0000000..e42c6cd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+
+/**
+ * Represents a partial rowkey without the entityId or without entityType and
+ * entityId for the sub application table.
+ *
+ */
+public class SubApplicationRowKeyPrefix extends SubApplicationRowKey
+    implements RowKeyPrefix<SubApplicationRowKey> {
+
+  /**
+   * Creates a prefix which generates the following rowKeyPrefixes for the sub
+   * application table:
+   * {@code subAppUserId!clusterId!entityType!entityPrefix!userId}.
+   *
+   * @param subAppUserId
+   *          identifying the subApp User
+   * @param clusterId
+   *          identifying the cluster
+   * @param entityType
+   *          which entity type
+   * @param entityIdPrefix
+   *          for entityId
+   * @param entityId
+   *          for an entity
+   * @param userId
+   *          for the user who runs the AM
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   */
+  public SubApplicationRowKeyPrefix(String subAppUserId, String clusterId,
+      String entityType, Long entityIdPrefix, String entityId,
+      String userId) {
+    super(subAppUserId, clusterId, entityType, entityIdPrefix, entityId,
+        userId);
+  }
+
+  /**
+   * Creates a prefix which generates the following rowKeyPrefixes for the sub
+   * application table:
+   * {@code subAppUserId!clusterId!entityType!entityPrefix!entityId!userId}.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   * @param clusterId
+   *          identifying the cluster
+   * @param subAppUserId
+   *          identifying the sub app user
+   * @param userId
+   *          identifying the user who runs the AM
+   */
+  public SubApplicationRowKeyPrefix(String clusterId, String subAppUserId,
+      String userId) {
+    this(subAppUserId, clusterId, null, null, null, userId);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.
+   * RowKeyPrefix#getRowKeyPrefix()
+   */
+  public byte[] getRowKeyPrefix() {
+    return super.getRowKey();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
new file mode 100644
index 0000000..334bab6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
+
+/**
+ * The sub application table has column families:
+ * info, config and metrics.
+ * Info stores information about a timeline entity object
+ * config stores configuration data of a timeline entity object
+ * metrics stores the metrics of a timeline entity object
+ *
+ * Example sub application table record:
+ *
+ * <pre>
+ * |-------------------------------------------------------------------------|
+ * |  Row          | Column Family             | Column Family| Column Family|
+ * |  key          | info                      | metrics      | config       |
+ * |-------------------------------------------------------------------------|
+ * | subAppUserId! | id:entityId               | metricId1:   | configKey1:  |
+ * | clusterId!    | type:entityType           | metricValue1 | configValue1 |
+ * | entityType!   |                           | @timestamp1  |              |
+ * | idPrefix!|    |                           |              | configKey2:  |
+ * | entityId!     | created_time:             | metricId1:   | configValue2 |
+ * | userId        | 1392993084018             | metricValue2 |              |
+ * |               |                           | @timestamp2  |              |
+ * |               | i!infoKey:                |              |              |
+ * |               | infoValue                 | metricId1:   |              |
+ * |               |                           | metricValue1 |              |
+ * |               |                           | @timestamp2  |              |
+ * |               | e!eventId=timestamp=      |              |              |
+ * |               | infoKey:                  |              |              |
+ * |               | eventInfoValue            |              |              |
+ * |               |                           |              |              |
+ * |               | r!relatesToKey:           |              |              |
+ * |               | id3=id4=id5               |              |              |
+ * |               |                           |              |              |
+ * |               | s!isRelatedToKey          |              |              |
+ * |               | id7=id9=id6               |              |              |
+ * |               |                           |              |              |
+ * |               | flowVersion:              |              |              |
+ * |               | versionValue              |              |              |
+ * |-------------------------------------------------------------------------|
+ * </pre>
+ */
+public class SubApplicationTable extends BaseTable<SubApplicationTable> {
+  /** sub app prefix. */
+  private static final String PREFIX =
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "subapplication";
+
+  /** config param name that specifies the subapplication table name. */
+  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
+
+  /**
+   * config param name that specifies the TTL for metrics column family in
+   * subapplication table.
+   */
+  private static final String METRICS_TTL_CONF_NAME = PREFIX
+      + ".table.metrics.ttl";
+
+  /**
+   * config param name that specifies max-versions for
+   * metrics column family in subapplication table.
+   */
+  private static final String METRICS_MAX_VERSIONS =
+      PREFIX + ".table.metrics.max-versions";
+
+  /** default value for subapplication table name. */
+  public static final String DEFAULT_TABLE_NAME =
+      "timelineservice.subapplication";
+
+  /** default TTL is 30 days for metrics timeseries. */
+  private static final int DEFAULT_METRICS_TTL = 2592000;
+
+  /** default max number of versions. */
+  private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
+
+  private static final Log LOG = LogFactory.getLog(
+      SubApplicationTable.class);
+
+  public SubApplicationTable() {
+    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
+   * (org.apache.hadoop.hbase.client.Admin,
+   * org.apache.hadoop.conf.Configuration)
+   */
+  public void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException {
+
+    TableName table = getTableName(hbaseConf);
+    if (admin.tableExists(table)) {
+      // do not disable / delete existing table
+      // similar to the approach taken by map-reduce jobs when
+      // output directory exists
+      throw new IOException("Table " + table.getNameAsString()
+          + " already exists.");
+    }
+
+    HTableDescriptor subAppTableDescp = new HTableDescriptor(table);
+    HColumnDescriptor infoCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.INFO.getBytes());
+    infoCF.setBloomFilterType(BloomType.ROWCOL);
+    subAppTableDescp.addFamily(infoCF);
+
+    HColumnDescriptor configCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.CONFIGS.getBytes());
+    configCF.setBloomFilterType(BloomType.ROWCOL);
+    configCF.setBlockCacheEnabled(true);
+    subAppTableDescp.addFamily(configCF);
+
+    HColumnDescriptor metricsCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.METRICS.getBytes());
+    subAppTableDescp.addFamily(metricsCF);
+    metricsCF.setBlockCacheEnabled(true);
+    // always keep 1 version (the latest)
+    metricsCF.setMinVersions(1);
+    metricsCF.setMaxVersions(
+        hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
+    metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
+        DEFAULT_METRICS_TTL));
+    subAppTableDescp.setRegionSplitPolicyClassName(
+        "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
+    subAppTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
+        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
+    admin.createTable(subAppTableDescp,
+        TimelineHBaseSchemaConstants.getUsernameSplits());
+    LOG.info("Status of table creation for " + table.getNameAsString() + "="
+        + admin.tableExists(table));
+  }
+
+  /**
+   * @param metricsTTL time to live parameter for the metricss in this table.
+   * @param hbaseConf configururation in which to set the metrics TTL config
+   *          variable.
+   */
+  public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
+    hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
new file mode 100644
index 0000000..52cc399
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication
+ * contains classes related to implementation for subapplication table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
index 58df970..1bd363f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
@@ -26,6 +26,10 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.junit.Test;
 
+/**
+ * Unit tests for key converters for various tables' row keys.
+ *
+ */
 public class TestKeyConverters {
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
index cbd2273..4770238 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
@@ -31,9 +31,14 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
 import org.junit.Test;
 
 
+/**
+ * Class to test the row key structures for various tables.
+ *
+ */
 public class TestRowKeys {
 
   private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
@@ -41,6 +46,7 @@ public class TestRowKeys {
       .toBytes(QUALIFIER_SEP);
   private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
   private final static String USER = QUALIFIER_SEP + "user";
+  private final static String SUB_APP_USER = QUALIFIER_SEP + "subAppUser";
   private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
       + QUALIFIER_SEP;
   private final static Long FLOW_RUN_ID;
@@ -247,4 +253,24 @@ public class TestRowKeys {
     verifyRowPrefixBytes(byteRowKeyPrefix);
   }
 
+  @Test
+  public void testSubAppRowKey() {
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId("entity1");
+    entity.setType("DAG");
+    entity.setIdPrefix(54321);
+
+    byte[] byteRowKey =
+        new SubApplicationRowKey(SUB_APP_USER, CLUSTER,
+            entity.getType(), entity.getIdPrefix(),
+            entity.getId(), USER).getRowKey();
+    SubApplicationRowKey rowKey = SubApplicationRowKey.parseRowKey(byteRowKey);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(SUB_APP_USER, rowKey.getSubAppUserId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+    assertEquals(USER, rowKey.getUserId());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a990ff70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
index f0ef720..148cf56 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.application.Applica
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
 import org.junit.Test;
 
 /**
@@ -38,6 +39,9 @@ public class TestRowKeysAsString {
           + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
   private final static String USER =
       TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "user";
+  private final static String SUB_APP_USER =
+      TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "subAppUser";
+
   private final static String FLOW_NAME =
       "dummy_" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR
           + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "flow"
@@ -112,4 +116,29 @@ public class TestRowKeysAsString {
     assertEquals(FLOW_NAME, rowKey.getFlowName());
     assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
   }
+
+  @Test(timeout = 10000)
+  public void testSubApplicationRowKey() {
+    char del = TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+    char esc = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+    String id = del + esc + "ent" + esc + del + "ity" + esc + del + esc + "id"
+        + esc + del + esc;
+    String type = "entity" + esc + del + esc + "Type";
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId(id);
+    entity.setType(type);
+    entity.setIdPrefix(54321);
+
+    String rowKeyAsString = new SubApplicationRowKey(SUB_APP_USER, CLUSTER,
+        entity.getType(), entity.getIdPrefix(), entity.getId(), USER)
+            .getRowKeyAsString();
+    SubApplicationRowKey rowKey = SubApplicationRowKey
+        .parseRowKeyFromString(rowKeyAsString);
+    assertEquals(SUB_APP_USER, rowKey.getSubAppUserId());
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+    assertEquals(USER, rowKey.getUserId());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: Revert "HADOOP-14671. Upgrade to Apache Yetus 0.5.0."

Posted by st...@apache.org.
Revert "HADOOP-14671. Upgrade to Apache Yetus 0.5.0."

This reverts commit 33afa94af1af7b00506284da6493c7175dbad3c0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99926756
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99926756
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99926756

Branch: refs/heads/HADOOP-13345
Commit: 99926756fc036d6949c2602356ca0732b88e2653
Parents: 200b113
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Aug 30 16:54:30 2017 +0200
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Aug 30 16:54:30 2017 +0200

----------------------------------------------------------------------
 dev-support/bin/yetus-wrapper | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99926756/dev-support/bin/yetus-wrapper
----------------------------------------------------------------------
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index a5bebe6..9f6bb33 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -73,7 +73,7 @@ WANTED="$1"
 shift
 ARGV=("$@")
 
-HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.5.0}
+HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.4.0}
 BIN=$(yetus_abs "${BASH_SOURCE-$0}")
 BINDIR=$(dirname "${BIN}")
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: YARN-7038. [Atsv2 Security] CollectorNodemanagerProtocol RPC interface doesn't work when service authorization is enabled. Contributed by Varun Saxena.

Posted by st...@apache.org.
YARN-7038. [Atsv2 Security] CollectorNodemanagerProtocol RPC interface doesn't work when service authorization is enabled. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32188d32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32188d32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32188d32

Branch: refs/heads/HADOOP-13345
Commit: 32188d32954d94ec2efeec2b2fcc5b2abff4c1ea
Parents: b664569
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Fri Aug 18 13:32:36 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:54 2017 +0530

----------------------------------------------------------------------
 .../hadoop-common/src/main/conf/hadoop-policy.xml        | 11 +++++++++++
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java   |  4 ++++
 .../hadoop/yarn/conf/TestYarnConfigurationFields.java    |  2 ++
 .../nodemanager/collectormanager/NMCollectorService.java |  7 +++++++
 .../nodemanager/security/authorize/NMPolicyProvider.java |  6 +++++-
 5 files changed, 29 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32188d32/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
index 2bf5c02..d282c58 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
@@ -223,4 +223,15 @@
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
     A special value of "*" means all users are allowed.</description>
   </property>
+
+  <property>
+    <name>security.collector-nodemanager.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for CollectorNodemanagerProtocol, used by nodemanager
+    if timeline service v2 is enabled, for the timeline collector and nodemanager
+    to communicate with each other.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32188d32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index ff9632a..8a513ac 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1782,6 +1782,10 @@ public class YarnConfiguration extends Configuration {
   YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONHISTORY_PROTOCOL =
       "security.applicationhistory.protocol.acl";
 
+  public static final String
+      YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL =
+      "security.collector-nodemanager.protocol.acl";
+
   /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL
    * to a running container */
   public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32188d32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index d97c6eb..bd7bf93 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -66,6 +66,8 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase {
     configurationPropsToSkipCompare
         .add(YarnConfiguration
             .YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL);
+    configurationPropsToSkipCompare.add(YarnConfiguration
+        .YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL);
     configurationPropsToSkipCompare.add(YarnConfiguration.CURATOR_LEADER_ELECTOR);
 
     // Federation default configs to be ignored

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32188d32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
index 7db6d70..4648a65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/collectormanager/NMCollectorService.java
@@ -26,6 +26,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -40,6 +41,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.ReportNewCollectorInfoR
 import org.apache.hadoop.yarn.server.api.records.AppCollectorData;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
+import org.apache.hadoop.yarn.server.nodemanager.security.authorize.NMPolicyProvider;
 import org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher;
 
 /**
@@ -83,6 +85,11 @@ public class NMCollectorService extends CompositeService implements
             conf.getInt(YarnConfiguration.NM_COLLECTOR_SERVICE_THREAD_COUNT,
                 YarnConfiguration.DEFAULT_NM_COLLECTOR_SERVICE_THREAD_COUNT));
 
+    if (conf.getBoolean(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
+      server.refreshServiceAcl(conf, new NMPolicyProvider());
+    }
+
     server.start();
     collectorServerAddress = conf.updateConnectAddr(
         YarnConfiguration.NM_BIND_HOST,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32188d32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
index 89e3d78..cc668f7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/security/authorize/NMPolicyProvider.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocolPB;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.api.CollectorNodemanagerProtocolPB;
 import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocolPB;
 
 /**
@@ -38,7 +39,10 @@ public class NMPolicyProvider extends PolicyProvider {
         YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL, 
         ContainerManagementProtocolPB.class),
     new Service(YarnConfiguration.YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER, 
-        LocalizationProtocolPB.class)
+        LocalizationProtocolPB.class),
+    new Service(YarnConfiguration.
+        YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL,
+            CollectorNodemanagerProtocolPB.class)
   };
 
   @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-6256. Add FROM_ID info key for timeline entities in reader response (Rohith Sharma K S via Varun Saxena)

Posted by st...@apache.org.
YARN-6256. Add FROM_ID info key for timeline entities in reader response (Rohith Sharma K S via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3bd8d6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3bd8d6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3bd8d6a

Branch: refs/heads/HADOOP-13345
Commit: c3bd8d6ad3e30c08865cc1a5f374d1d2a485f844
Parents: 8bb2646
Author: Varun Saxena <va...@apache.org>
Authored: Tue Mar 7 23:54:38 2017 +0530
Committer: Varun Saxena <va...@apache.org>
Committed: Wed Aug 30 11:29:52 2017 +0530

----------------------------------------------------------------------
 ...stTimelineReaderWebServicesHBaseStorage.java |  33 +--
 .../storage/TestHBaseTimelineStorageApps.java   |  19 +-
 .../TestHBaseTimelineStorageEntities.java       |  21 +-
 .../storage/application/ApplicationRowKey.java  |  49 +++-
 .../storage/entity/EntityRowKey.java            |  56 ++++-
 .../storage/flow/FlowRunRowKey.java             |  47 +++-
 .../storage/reader/ApplicationEntityReader.java |  28 ++-
 .../storage/reader/FlowRunEntityReader.java     |  32 +--
 .../storage/reader/GenericEntityReader.java     |  25 +-
 .../storage/common/TestRowKeys.java             |  21 --
 .../storage/common/TestRowKeysAsString.java     | 115 ++++++++++
 .../reader/TimelineEntityFilters.java           |  29 +--
 .../reader/TimelineReaderWebServices.java       | 227 +++++++------------
 .../reader/TimelineReaderWebServicesUtils.java  |   4 +-
 14 files changed, 445 insertions(+), 261 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index d0f674f..b2fe267 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -825,7 +825,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
       assertEquals(2, entities1.size());
       for (TimelineEntity entity : entities1) {
         assertNotNull(entity.getInfo());
-        assertEquals(1, entity.getInfo().size());
+        assertEquals(2, entity.getInfo().size());
         String uid =
             (String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
         assertNotNull(uid);
@@ -853,7 +853,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
       assertEquals(2, entities2.size());
       for (TimelineEntity entity : entities2) {
         assertNotNull(entity.getInfo());
-        assertEquals(1, entity.getInfo().size());
+        assertEquals(2, entity.getInfo().size());
         String uid =
             (String) entity.getInfo().get(TimelineReaderManager.UID_KEY);
         assertNotNull(uid);
@@ -1417,8 +1417,9 @@ public class TestTimelineReaderWebServicesHBaseStorage
         infoCnt += entity.getInfo().size();
         assertEquals("entity2", entity.getId());
       }
-      // Includes UID in info field even if fields not specified as INFO.
-      assertEquals(1, infoCnt);
+      // Includes UID and FROM_ID in info field even if fields not specified as
+      // INFO.
+      assertEquals(2, infoCnt);
 
       // infofilters=(info1 eq cluster1 AND info4 eq 35000) OR
       // (info1 eq cluster2 AND info2 eq 2.0)
@@ -1436,8 +1437,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
         infoCnt += entity.getInfo().size();
         assertEquals("entity2", entity.getId());
       }
-      // Includes UID in info field.
-      assertEquals(4, infoCnt);
+      // Includes UID and FROM_ID in info field.
+      assertEquals(5, infoCnt);
 
       // Test for behavior when compare op is ne(not equals) vs ene
       // (exists and not equals). info3 does not exist for entity2. For ne,
@@ -2171,8 +2172,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
       // verify for entity-10 to entity-7 in descending order.
       TimelineEntity entity = verifyPaginatedEntites(entities, limit, 10);
 
-      queryParam = "?limit=" + limit + "&&fromidprefix=" + entity.getIdPrefix()
-          + "&&fromid=" + entity.getId();
+      queryParam = "?limit=" + limit + "&fromid="
+          + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
       uri = URI.create(resourceUri + queryParam);
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
@@ -2180,7 +2181,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
       // verify for entity-7 to entity-4 in descending order.
       entity = verifyPaginatedEntites(entities, limit, 7);
 
-      queryParam = "?limit=" + limit + "&&fromidprefix=" + entity.getIdPrefix();
+      queryParam = "?limit=" + limit + "&fromid="
+          + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
       uri = URI.create(resourceUri + queryParam);
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
@@ -2188,7 +2190,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
       // verify for entity-4 to entity-1 in descending order.
       entity = verifyPaginatedEntites(entities, limit, 4);
 
-      queryParam = "?limit=" + limit + "&&fromidprefix=" + entity.getIdPrefix();
+      queryParam = "?limit=" + limit + "&fromid="
+          + entity.getInfo().get(TimelineReaderUtils.FROMID_KEY);
       uri = URI.create(resourceUri + queryParam);
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
@@ -2264,7 +2267,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
       TimelineEntity entity10 = entities.get(limit - 1);
 
       uri =
-          URI.create(resourceUri + queryParam + "&fromid=" + entity10.getId());
+          URI.create(resourceUri + queryParam + "&fromid="
+              + entity10.getInfo().get(TimelineReaderUtils.FROMID_KEY));
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
       });
@@ -2308,7 +2312,8 @@ public class TestTimelineReaderWebServicesHBaseStorage
       TimelineEntity entity3 = entities.get(limit - 1);
 
       uri =
-          URI.create(resourceUri + queryParam + "&fromid=" + entity3.getId());
+          URI.create(resourceUri + queryParam + "&fromid="
+              + entity3.getInfo().get(TimelineReaderUtils.FROMID_KEY));
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
       });
@@ -2352,7 +2357,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
       TimelineEntity entity2 = entities.get(limit - 1);
 
       uri = URI.create(resourceUri + queryParam + "&fromid="
-          + entity2.getInfo().get("SYSTEM_INFO_FLOW_RUN_ID"));
+          + entity2.getInfo().get(TimelineReaderUtils.FROMID_KEY));
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
       });
@@ -2362,7 +2367,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
       assertEquals(entity3, entities.get(1));
 
       uri = URI.create(resourceUri + queryParam + "&fromid="
-          + entity3.getInfo().get("SYSTEM_INFO_FLOW_RUN_ID"));
+          + entity3.getInfo().get(TimelineReaderUtils.FROMID_KEY));
       resp = getResponse(client, uri);
       entities = resp.getEntity(new GenericType<List<TimelineEntity>>() {
       });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index 9216d0a..4b1147d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -389,6 +389,8 @@ public class TestHBaseTimelineStorageApps {
           e1.getType());
       assertEquals(cTime, e1.getCreatedTime());
       Map<String, Object> infoMap2 = e1.getInfo();
+      // fromid key is added by storage. Remove it for comparision.
+      infoMap2.remove("FROM_ID");
       assertEquals(infoMap, infoMap2);
 
       Map<String, Set<String>> isRelatedTo2 = e1.getIsRelatedToEntities();
@@ -448,6 +450,9 @@ public class TestHBaseTimelineStorageApps {
       assertEquals(TimelineEntityType.YARN_APPLICATION.toString(),
           e1.getType());
       assertEquals(cTime, e1.getCreatedTime());
+      infoMap2 = e1.getInfo();
+      // fromid key is added by storage. Remove it for comparison.
+      infoMap2.remove("FROM_ID");
       assertEquals(infoMap, e1.getInfo());
       assertEquals(isRelatedTo, e1.getIsRelatedToEntities());
       assertEquals(relatesTo, e1.getRelatesToEntities());
@@ -680,7 +685,7 @@ public class TestHBaseTimelineStorageApps {
     }
     assertEquals(5, cfgCnt);
     assertEquals(3, metricCnt);
-    assertEquals(5, infoCnt);
+    assertEquals(8, infoCnt);
     assertEquals(4, eventCnt);
     assertEquals(4, relatesToCnt);
     assertEquals(4, isRelatedToCnt);
@@ -744,7 +749,8 @@ public class TestHBaseTimelineStorageApps {
         TimelineEntityType.YARN_APPLICATION.toString(), null),
         new TimelineDataToRetrieve());
     assertNotNull(e1);
-    assertTrue(e1.getInfo().isEmpty() && e1.getConfigs().isEmpty() &&
+    assertEquals(1, e1.getInfo().size());
+    assertTrue(e1.getConfigs().isEmpty() &&
         e1.getMetrics().isEmpty() && e1.getIsRelatedToEntities().isEmpty() &&
         e1.getRelatesToEntities().isEmpty());
     Set<TimelineEntity> es1 = reader.getEntities(
@@ -755,7 +761,8 @@ public class TestHBaseTimelineStorageApps {
         new TimelineDataToRetrieve());
     assertEquals(3, es1.size());
     for (TimelineEntity e : es1) {
-      assertTrue(e.getInfo().isEmpty() && e.getConfigs().isEmpty() &&
+      assertEquals(1, e1.getInfo().size());
+      assertTrue(e.getConfigs().isEmpty() &&
           e.getMetrics().isEmpty() && e.getIsRelatedToEntities().isEmpty() &&
           e.getRelatesToEntities().isEmpty());
     }
@@ -788,7 +795,7 @@ public class TestHBaseTimelineStorageApps {
       isRelatedToCnt += entity.getIsRelatedToEntities().size();
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(0, infoCnt);
+    assertEquals(3, infoCnt);
     assertEquals(4, isRelatedToCnt);
     assertEquals(3, metricsCnt);
   }
@@ -1770,7 +1777,7 @@ public class TestHBaseTimelineStorageApps {
     for (TimelineEntity entity : entities) {
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(5, infoCnt);
+    assertEquals(7, infoCnt);
 
     TimelineFilterList infoFilterList1 = new TimelineFilterList(
         new TimelineKeyValueFilter(
@@ -1787,7 +1794,7 @@ public class TestHBaseTimelineStorageApps {
     for (TimelineEntity entity : entities) {
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(3, infoCnt);
+    assertEquals(4, infoCnt);
 
     TimelineFilterList infoFilterList2 = new TimelineFilterList(
         new TimelineKeyValueFilter(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 0ba841f..1237fb3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -311,6 +311,8 @@ public class TestHBaseTimelineStorageEntities {
       assertEquals(type, e1.getType());
       assertEquals(cTime, e1.getCreatedTime());
       Map<String, Object> infoMap2 = e1.getInfo();
+      // fromid key is added by storage. Remove it for comparison.
+      infoMap2.remove("FROM_ID");
       assertEquals(infoMap, infoMap2);
 
       Map<String, Set<String>> isRelatedTo2 = e1.getIsRelatedToEntities();
@@ -336,7 +338,10 @@ public class TestHBaseTimelineStorageEntities {
       assertEquals(id, e1.getId());
       assertEquals(type, e1.getType());
       assertEquals(cTime, e1.getCreatedTime());
-      assertEquals(infoMap, e1.getInfo());
+      infoMap2 = e1.getInfo();
+      // fromid key is added by storage. Remove it for comparision.
+      infoMap2.remove("FROM_ID");
+      assertEquals(infoMap, infoMap2);
       assertEquals(isRelatedTo, e1.getIsRelatedToEntities());
       assertEquals(relatesTo, e1.getRelatesToEntities());
       assertEquals(conf, e1.getConfigs());
@@ -573,7 +578,7 @@ public class TestHBaseTimelineStorageEntities {
     }
     assertEquals(5, cfgCnt);
     assertEquals(3, metricCnt);
-    assertEquals(5, infoCnt);
+    assertEquals(8, infoCnt);
     assertEquals(4, eventCnt);
     assertEquals(4, relatesToCnt);
     assertEquals(4, isRelatedToCnt);
@@ -1126,7 +1131,8 @@ public class TestHBaseTimelineStorageEntities {
         1002345678919L, "application_1231111111_1111", "world", "hello"),
         new TimelineDataToRetrieve());
     assertNotNull(e1);
-    assertTrue(e1.getInfo().isEmpty() && e1.getConfigs().isEmpty() &&
+    assertEquals(1, e1.getInfo().size());
+    assertTrue(e1.getConfigs().isEmpty() &&
         e1.getMetrics().isEmpty() && e1.getIsRelatedToEntities().isEmpty() &&
         e1.getRelatesToEntities().isEmpty());
     Set<TimelineEntity> es1 = reader.getEntities(
@@ -1136,9 +1142,10 @@ public class TestHBaseTimelineStorageEntities {
         new TimelineDataToRetrieve());
     assertEquals(3, es1.size());
     for (TimelineEntity e : es1) {
-      assertTrue(e.getInfo().isEmpty() && e.getConfigs().isEmpty() &&
+      assertTrue(e.getConfigs().isEmpty() &&
           e.getMetrics().isEmpty() && e.getIsRelatedToEntities().isEmpty() &&
           e.getRelatesToEntities().isEmpty());
+      assertEquals(1, e.getInfo().size());
     }
   }
 
@@ -1167,7 +1174,7 @@ public class TestHBaseTimelineStorageEntities {
       isRelatedToCnt += entity.getIsRelatedToEntities().size();
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(0, infoCnt);
+    assertEquals(3, infoCnt);
     assertEquals(4, isRelatedToCnt);
     assertEquals(3, metricsCnt);
   }
@@ -1603,7 +1610,7 @@ public class TestHBaseTimelineStorageEntities {
     for (TimelineEntity entity : entities) {
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(5, infoCnt);
+    assertEquals(7, infoCnt);
 
     TimelineFilterList infoFilterList1 = new TimelineFilterList(
         new TimelineKeyValueFilter(
@@ -1619,7 +1626,7 @@ public class TestHBaseTimelineStorageEntities {
     for (TimelineEntity entity : entities) {
       infoCnt += entity.getInfo().size();
     }
-    assertEquals(3, infoCnt);
+    assertEquals(4, infoCnt);
 
     TimelineFilterList infoFilterList2 = new TimelineFilterList(
         new TimelineKeyValueFilter(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
index da62fdf..e89a6a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
@@ -18,9 +18,13 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.storage.application;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
@@ -33,7 +37,7 @@ public class ApplicationRowKey {
   private final String flowName;
   private final Long flowRunId;
   private final String appId;
-  private final KeyConverter<ApplicationRowKey> appRowKeyConverter =
+  private final ApplicationRowKeyConverter appRowKeyConverter =
       new ApplicationRowKeyConverter();
 
   public ApplicationRowKey(String clusterId, String userId, String flowName,
@@ -86,6 +90,24 @@ public class ApplicationRowKey {
   }
 
   /**
+   * Constructs a row key for the application table as follows:
+   * {@code clusterId!userName!flowName!flowRunId!AppId}.
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return appRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>ApplicationRowKey</cite> object.
+   */
+  public static ApplicationRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new ApplicationRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
    * Encodes and decodes row key for application table. The row key is of the
    * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
    * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
@@ -93,7 +115,7 @@ public class ApplicationRowKey {
    * <p>
    */
   final private static class ApplicationRowKeyConverter implements
-      KeyConverter<ApplicationRowKey> {
+      KeyConverter<ApplicationRowKey>, KeyConverterToString<ApplicationRowKey> {
 
     private final KeyConverter<String> appIDKeyConverter =
         new AppIdKeyConverter();
@@ -201,6 +223,29 @@ public class ApplicationRowKey {
       return new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
           appId);
     }
+
+    @Override
+    public String encodeAsString(ApplicationRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null || key.appId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils
+          .joinAndEscapeStrings(new String[] {key.clusterId, key.userId,
+              key.flowName, key.flowRunId.toString(), key.appId});
+    }
+
+    @Override
+    public ApplicationRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 5) {
+        throw new IllegalArgumentException(
+            "Invalid row key for application table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      return new ApplicationRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId, split.get(4));
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
index a8f1d0c..7bf02f2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
@@ -17,9 +17,13 @@
  */
 package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
@@ -35,7 +39,7 @@ public class EntityRowKey {
   private final String entityType;
   private final Long entityIdPrefix;
   private final String entityId;
-  private final KeyConverter<EntityRowKey> entityRowKeyConverter =
+  private final EntityRowKeyConverter entityRowKeyConverter =
       new EntityRowKeyConverter();
 
   public EntityRowKey(String clusterId, String userId, String flowName,
@@ -96,7 +100,6 @@ public class EntityRowKey {
 
   /**
    * Given the raw row key as bytes, returns the row key as an object.
-   *
    * @param rowKey byte representation of row key.
    * @return An <cite>EntityRowKey</cite> object.
    */
@@ -105,6 +108,27 @@ public class EntityRowKey {
   }
 
   /**
+   * Constructs a row key for the entity table as follows:
+   * <p>
+   * {@code userName!clusterId!flowName!flowRunId!AppId!
+   * entityType!entityIdPrefix!entityId}.
+   * </p>
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return entityRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>EntityRowKey</cite> object.
+   */
+  public static EntityRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new EntityRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
    * Encodes and decodes row key for entity table. The row key is of the form :
    * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
    * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
@@ -112,7 +136,7 @@ public class EntityRowKey {
    * <p>
    */
   final private static class EntityRowKeyConverter implements
-      KeyConverter<EntityRowKey> {
+      KeyConverter<EntityRowKey>, KeyConverterToString<EntityRowKey> {
 
     private final AppIdKeyConverter appIDKeyConverter = new AppIdKeyConverter();
 
@@ -245,5 +269,31 @@ public class EntityRowKey {
       return new EntityRowKey(clusterId, userId, flowName, flowRunId, appId,
           entityType, entityPrefixId, entityId);
     }
+
+    @Override
+    public String encodeAsString(EntityRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null || key.appId == null
+          || key.entityType == null || key.entityIdPrefix == null
+          || key.entityId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils
+          .joinAndEscapeStrings(new String[] {key.clusterId, key.userId,
+              key.flowName, key.flowRunId.toString(), key.appId, key.entityType,
+              key.entityIdPrefix.toString(), key.entityId});
+    }
+
+    @Override
+    public EntityRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 8) {
+        throw new IllegalArgumentException("Invalid row key for entity table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      Long entityIdPrefix = Long.valueOf(split.get(6));
+      return new EntityRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId, split.get(4), split.get(5), entityIdPrefix, split.get(7));
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
index 8fda9a8..7ce91cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
@@ -17,8 +17,12 @@
  */
 package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
 
+import java.util.List;
+
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 
@@ -70,7 +74,6 @@ public class FlowRunRowKey {
 
   /**
    * Given the raw row key as bytes, returns the row key as an object.
-   *
    * @param rowKey Byte representation of row key.
    * @return A <cite>FlowRunRowKey</cite> object.
    */
@@ -79,6 +82,24 @@ public class FlowRunRowKey {
   }
 
   /**
+   * Constructs a row key for the flow run table as follows:
+   * {@code clusterId!userId!flowName!Flow Run Id}.
+   * @return String representation of row key
+   */
+  public String getRowKeyAsString() {
+    return flowRunRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>FlowRunRowKey</cite> object.
+   */
+  public static FlowRunRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new FlowRunRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
    * returns the Flow Key as a verbose String output.
    * @return String
    */
@@ -101,7 +122,7 @@ public class FlowRunRowKey {
    * <p>
    */
   final private static class FlowRunRowKeyConverter implements
-      KeyConverter<FlowRunRowKey> {
+      KeyConverter<FlowRunRowKey>, KeyConverterToString<FlowRunRowKey> {
 
     private FlowRunRowKeyConverter() {
     }
@@ -186,5 +207,27 @@ public class FlowRunRowKey {
           LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
       return new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
     }
+
+    @Override
+    public String encodeAsString(FlowRunRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(new String[] {
+          key.clusterId, key.userId, key.flowName, key.flowRunId.toString()});
+    }
+
+    @Override
+    public FlowRunRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 4) {
+        throw new IllegalArgumentException(
+            "Invalid row key for flow run table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      return new FlowRunRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
index 4e8286d..b4bb005 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
@@ -48,11 +49,11 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.application.Applica
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
 
 import com.google.common.base.Preconditions;
 
@@ -372,18 +373,17 @@ class ApplicationEntityReader extends GenericEntityReader {
           context.getFlowRunId());
       scan.setRowPrefixFilter(applicationRowKeyPrefix.getRowKeyPrefix());
     } else {
-      Long flowRunId = context.getFlowRunId();
-      if (flowRunId == null) {
-        AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(
-            getFilters().getFromId());
-        FlowContext flowContext = lookupFlowContext(appToFlowRowKey,
-            context.getClusterId(), hbaseConf, conn);
-        flowRunId = flowContext.getFlowRunId();
+      ApplicationRowKey applicationRowKey = null;
+      try {
+        applicationRowKey =
+            ApplicationRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(applicationRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
       }
-
-      ApplicationRowKey applicationRowKey =
-          new ApplicationRowKey(context.getClusterId(), context.getUserId(),
-              context.getFlowName(), flowRunId, getFilters().getFromId());
 
       // set start row
       scan.setStartRow(applicationRowKey.getRowKey());
@@ -497,6 +497,10 @@ class ApplicationEntityReader extends GenericEntityReader {
     if (hasField(fieldsToRetrieve, Field.METRICS)) {
       readMetrics(entity, result, ApplicationColumnPrefix.METRIC);
     }
+
+    ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(result.getRow());
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
     return entity;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
index cedf96a..af043b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
@@ -217,11 +218,17 @@ class FlowRunEntityReader extends TimelineEntityReader {
           context.getUserId(), context.getFlowName());
       scan.setRowPrefixFilter(flowRunRowKeyPrefix.getRowKeyPrefix());
     } else {
-
-      FlowRunRowKey flowRunRowKey =
-          new FlowRunRowKey(context.getClusterId(), context.getUserId(),
-              context.getFlowName(), Long.parseLong(getFilters().getFromId()));
-
+      FlowRunRowKey flowRunRowKey = null;
+      try {
+        flowRunRowKey =
+            FlowRunRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(flowRunRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
       // set start row
       scan.setStartRow(flowRunRowKey.getRowKey());
 
@@ -247,16 +254,11 @@ class FlowRunEntityReader extends TimelineEntityReader {
 
   @Override
   protected TimelineEntity parseEntity(Result result) throws IOException {
-    TimelineReaderContext context = getContext();
     FlowRunEntity flowRun = new FlowRunEntity();
-    flowRun.setUser(context.getUserId());
-    flowRun.setName(context.getFlowName());
-    if (isSingleEntityRead()) {
-      flowRun.setRunId(context.getFlowRunId());
-    } else {
-      FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(result.getRow());
-      flowRun.setRunId(rowKey.getFlowRunId());
-    }
+    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(result.getRow());
+    flowRun.setRunId(rowKey.getFlowRunId());
+    flowRun.setUser(rowKey.getUserId());
+    flowRun.setName(rowKey.getFlowName());
 
     // read the start time
     Long startTime = (Long) FlowRunColumn.MIN_START_TIME.readResult(result);
@@ -285,6 +287,8 @@ class FlowRunEntityReader extends TimelineEntityReader {
 
     // set the id
     flowRun.setId(flowRun.getId());
+    flowRun.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
     return flowRun;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
index f6904c5..39013d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
@@ -56,6 +57,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
 
 import com.google.common.base.Preconditions;
 
@@ -475,19 +477,27 @@ class GenericEntityReader extends TimelineEntityReader {
     TimelineReaderContext context = getContext();
     RowKeyPrefix<EntityRowKey> entityRowKeyPrefix = null;
     // default mode, will always scans from beginning of entity type.
-    if (getFilters() == null || getFilters().getFromIdPrefix() == null) {
+    if (getFilters() == null || getFilters().getFromId() == null) {
       entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
           context.getUserId(), context.getFlowName(), context.getFlowRunId(),
           context.getAppId(), context.getEntityType(), null, null);
       scan.setRowPrefixFilter(entityRowKeyPrefix.getRowKeyPrefix());
     } else { // pagination mode, will scan from given entityIdPrefix!enitityId
-      entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
-          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
-          context.getAppId(), context.getEntityType(),
-          getFilters().getFromIdPrefix(), getFilters().getFromId());
+
+      EntityRowKey entityRowKey = null;
+      try {
+        entityRowKey =
+            EntityRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(entityRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
 
       // set start row
-      scan.setStartRow(entityRowKeyPrefix.getRowKeyPrefix());
+      scan.setStartRow(entityRowKey.getRowKey());
 
       // get the bytes for stop row
       entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
@@ -599,6 +609,9 @@ class GenericEntityReader extends TimelineEntityReader {
     if (hasField(fieldsToRetrieve, Field.METRICS)) {
       readMetrics(entity, result, EntityColumnPrefix.METRIC);
     }
+
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        parseRowKey.getRowKeyAsString());
     return entity;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
index bac5f85..cbd2273 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertTrue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
@@ -226,26 +225,6 @@ public class TestRowKeys {
   }
 
   @Test
-  public void testFlowActivityRowKeyAsString() {
-    String cluster = "cl" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR + "uster"
-        + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
-    String user = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "user";
-    String fName = "dummy_" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR
-        + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "flow"
-        + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
-    Long ts = 1459900830000L;
-    Long dayTimestamp = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
-    String rowKeyAsString =
-        new FlowActivityRowKey(cluster, ts, user, fName).getRowKeyAsString();
-    FlowActivityRowKey rowKey =
-        FlowActivityRowKey.parseRowKeyFromString(rowKeyAsString);
-    assertEquals(cluster, rowKey.getClusterId());
-    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
-    assertEquals(user, rowKey.getUserId());
-    assertEquals(fName, rowKey.getFlowName());
-  }
-
-  @Test
   public void testFlowRunRowKey() {
     byte[] byteRowKey =
         new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID).getRowKey();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
new file mode 100644
index 0000000..f0ef720
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.junit.Test;
+
+/**
+ * Test for row key as string.
+ */
+public class TestRowKeysAsString {
+
+  private final static String CLUSTER =
+      "cl" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR + "uster"
+          + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+  private final static String USER =
+      TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "user";
+  private final static String FLOW_NAME =
+      "dummy_" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR
+          + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "flow"
+          + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+  private final static Long FLOW_RUN_ID = System.currentTimeMillis();
+  private final static String APPLICATION_ID =
+      ApplicationId.newInstance(System.currentTimeMillis(), 1).toString();
+
+  @Test(timeout = 10000)
+  public void testApplicationRow() {
+    String rowKeyAsString = new ApplicationRowKey(CLUSTER, USER, FLOW_NAME,
+        FLOW_RUN_ID, APPLICATION_ID).getRowKeyAsString();
+    ApplicationRowKey rowKey =
+        ApplicationRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+  }
+
+  @Test(timeout = 10000)
+  public void testEntityRowKey() {
+    char del = TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+    char esc = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+    String id = del + esc + "ent" + esc + del + "ity" + esc + del + esc + "id"
+        + esc + del + esc;
+    String type = "entity" + esc + del + esc + "Type";
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId(id);
+    entity.setType(type);
+    entity.setIdPrefix(54321);
+
+    String rowKeyAsString =
+        new EntityRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID,
+            entity.getType(), entity.getIdPrefix(), entity.getId())
+                .getRowKeyAsString();
+    EntityRowKey rowKey = EntityRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+
+  }
+
+  @Test(timeout = 10000)
+  public void testFlowActivityRowKey() {
+    Long ts = 1459900830000L;
+    Long dayTimestamp = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+    String rowKeyAsString = new FlowActivityRowKey(CLUSTER, ts, USER, FLOW_NAME)
+        .getRowKeyAsString();
+    FlowActivityRowKey rowKey =
+        FlowActivityRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+  }
+
+  @Test(timeout = 10000)
+  public void testFlowRunRowKey() {
+    String rowKeyAsString =
+        new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID)
+            .getRowKeyAsString();
+    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
index 79a83c6..dc3e3ec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineEntityFilters.java
@@ -99,19 +99,10 @@ import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyVa
  * filter list, event filters can be evaluated with logical AND/OR and we can
  * create a hierarchy of these {@link TimelineExistsFilter} objects. If null or
  * empty, the filter is not applied.</li>
- * <li><b>fromIdPrefix</b> - If specified, retrieve entities with an id prefix
- * greater than or equal to the specified fromIdPrefix. If fromIdPrefix is same
- * for all entities of a given entity type, then the user must provide fromId as
- * a filter to denote the start entity from which further entities will be
- * fetched. fromIdPrefix is mandatory even in the case the entity id prefix is
- * not used and should be set to 0.</li>
- * <li><b>fromId</b> - If specified along with fromIdPrefix, retrieve entities
- * with an id prefix greater than or equal to specified id prefix in
- * fromIdPrefix and entity id lexicographically greater than or equal to entity
- * id specified in fromId. Please note than fromIdPrefix is mandatory if fromId
- * is specified, otherwise, the filter will be ignored. It is recommended to
- * provide both fromIdPrefix and fromId filters for more accurate results as id
- * prefix may not be unique for an entity.</li>
+ * <li><b>fromId</b> - If specified, retrieve the next set of entities from the
+ * given fromId. The set of entities retrieved is inclusive of specified fromId.
+ * fromId should be taken from the value associated with FROM_ID info key in
+ * entity response which was sent earlier.</li>
  * </ul>
  */
 @Private
@@ -126,7 +117,6 @@ public class TimelineEntityFilters {
   private TimelineFilterList configFilters;
   private TimelineFilterList metricFilters;
   private TimelineFilterList eventFilters;
-  private Long fromIdPrefix;
   private String fromId;
   private static final long DEFAULT_BEGIN_TIME = 0L;
   private static final long DEFAULT_END_TIME = Long.MAX_VALUE;
@@ -146,11 +136,10 @@ public class TimelineEntityFilters {
       TimelineFilterList entityInfoFilters,
       TimelineFilterList entityConfigFilters,
       TimelineFilterList entityMetricFilters,
-      TimelineFilterList entityEventFilters, Long fromidprefix, String fromid) {
+      TimelineFilterList entityEventFilters, String fromid) {
     this(entityLimit, timeBegin, timeEnd, entityRelatesTo, entityIsRelatedTo,
         entityInfoFilters, entityConfigFilters, entityMetricFilters,
         entityEventFilters);
-    this.fromIdPrefix = fromidprefix;
     this.fromId = fromid;
   }
 
@@ -276,12 +265,4 @@ public class TimelineEntityFilters {
   public void setFromId(String fromId) {
     this.fromId = fromId;
   }
-
-  public Long getFromIdPrefix() {
-    return fromIdPrefix;
-  }
-
-  public void setFromIdPrefix(Long fromIdPrefix) {
-    this.fromIdPrefix = fromIdPrefix;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3bd8d6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index 290c255..94ac948 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -265,20 +265,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
-   * @param fromIdPrefix If specified, retrieve entities with an id prefix
-   *     greater than or equal to the specified fromIdPrefix. If fromIdPrefix
-   *     is same for all entities of a given entity type, then the user must
-   *     provide fromId as a filter to denote the start entity from which
-   *     further entities will be fetched. fromIdPrefix is mandatory even
-   *     in the case the entity id prefix is not used and should be set to 0.
-   * @param fromId If specified along with fromIdPrefix, retrieve entities with
-   *     an id prefix greater than or equal to specified id prefix in
-   *     fromIdPrefix and entity id lexicographically greater than or equal
-   *     to entity id specified in fromId. Please note than fromIdPrefix is
-   *     mandatory if fromId is specified, otherwise, the filter will be
-   *     ignored. It is recommended to provide both fromIdPrefix and fromId
-   *     filters for more accurate results as id prefix may not be unique
-   *     for an entity.
+   * @param fromId If specified, retrieve the next set of entities from the
+   *     given fromId. The set of entities retrieved is inclusive of specified
+   *     fromId. fromId should be taken from the value associated with FROM_ID
+   *     info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances of the given entity type
@@ -310,7 +300,6 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
-      @QueryParam("fromidprefix") String fromIdPrefix,
       @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
@@ -335,7 +324,7 @@ public class TimelineReaderWebServices {
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
               infofilters, conffilters, metricfilters, eventfilters,
-              fromIdPrefix, fromId),
+              fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
           confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
     } catch (Exception e) {
@@ -418,20 +407,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
-   * @param fromIdPrefix If specified, retrieve entities with an id prefix
-   *     greater than or equal to the specified fromIdPrefix. If fromIdPrefix
-   *     is same for all entities of a given entity type, then the user must
-   *     provide fromId as a filter to denote the start entity from which
-   *     further entities will be fetched. fromIdPrefix is mandatory even
-   *     in the case the entity id prefix is not used and should be set to 0.
-   * @param fromId If specified along with fromIdPrefix, retrieve entities with
-   *     an id prefix greater than or equal to specified id prefix in
-   *     fromIdPrefix and entity id lexicographically greater than or equal
-   *     to entity id specified in fromId. Please note than fromIdPrefix is
-   *     mandatory if fromId is specified, otherwise, the filter will be
-   *     ignored. It is recommended to provide both fromIdPrefix and fromId
-   *     filters for more accurate results as id prefix may not be unique
-   *     for an entity.
+   * @param fromId If specified, retrieve the next set of entities from the
+   *     given fromId. The set of entities retrieved is inclusive of specified
+   *     fromId. fromId should be taken from the value associated with FROM_ID
+   *     info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances of the given entity type
@@ -468,12 +447,11 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
-      @QueryParam("fromidprefix") String fromIdPrefix,
       @QueryParam("fromid") String fromId) {
     return getEntities(req, res, null, appId, entityType, userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromIdPrefix,
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
         fromId);
   }
 
@@ -545,20 +523,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
-   * @param fromIdPrefix If specified, retrieve entities with an id prefix
-   *     greater than or equal to the specified fromIdPrefix. If fromIdPrefix
-   *     is same for all entities of a given entity type, then the user must
-   *     provide fromId as a filter to denote the start entity from which
-   *     further entities will be fetched. fromIdPrefix is mandatory even
-   *     in the case the entity id prefix is not used and should be set to 0.
-   * @param fromId If specified along with fromIdPrefix, retrieve entities with
-   *     an id prefix greater than or equal to specified id prefix in
-   *     fromIdPrefix and entity id lexicographically greater than or equal
-   *     to entity id specified in fromId. Please note than fromIdPrefix is
-   *     mandatory if fromId is specified, otherwise, the filter will be
-   *     ignored. It is recommended to provide both fromIdPrefix and fromId
-   *     filters for more accurate results as id prefix may not be unique
-   *     for an entity.
+   * @param fromId If specified, retrieve the next set of entities from the
+   *     given fromId. The set of entities retrieved is inclusive of specified
+   *     fromId. fromId should be taken from the value associated with FROM_ID
+   *     info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances of the given entity type
@@ -596,7 +564,6 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
-      @QueryParam("fromidprefix") String fromIdPrefix,
       @QueryParam("fromid") String fromId) {
     String url = req.getRequestURI() +
         (req.getQueryString() == null ? "" :
@@ -617,7 +584,7 @@ public class TimelineReaderWebServices {
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
               infofilters, conffilters, metricfilters, eventfilters,
-              fromIdPrefix, fromId),
+              fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
           confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
     } catch (Exception e) {
@@ -1098,9 +1065,10 @@ public class TimelineReaderWebServices {
    *     METRICS makes sense for flow runs hence only ALL or METRICS are
    *     supported as fields for fetching flow runs. Other fields will lead to
    *     HTTP 400 (Bad Request) response. (Optional query param).
-   * @param fromId Defines the flow run id. If specified, retrieve the next
-   *     set of flow runs from the given id. The set of flow runs retrieved
-   *     is inclusive of specified fromId.
+   * @param fromId If specified, retrieve the next set of flow run entities
+   *     from the given fromId. The set of entities retrieved is inclusive of
+   *     specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowRunEntity</cite> instances for the given flow are
@@ -1145,7 +1113,7 @@ public class TimelineReaderWebServices {
       entities = timelineReaderManager.getEntities(context,
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, null, null, null,
-              null, null, null, null, fromId),
+              null, null, null, fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
           null, metricsToRetrieve, fields, null));
     } catch (Exception e) {
@@ -1188,9 +1156,10 @@ public class TimelineReaderWebServices {
    *     METRICS makes sense for flow runs hence only ALL or METRICS are
    *     supported as fields for fetching flow runs. Other fields will lead to
    *     HTTP 400 (Bad Request) response. (Optional query param).
-   * @param fromId Defines the flow run id. If specified, retrieve the next
-   *     set of flow runs from the given id. The set of flow runs retrieved
-   *     is inclusive of specified fromId.
+   * @param fromId If specified, retrieve the next set of flow run entities
+   *     from the given fromId. The set of entities retrieved is inclusive of
+   *     specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowRunEntity</cite> instances for the given flow are
@@ -1247,9 +1216,10 @@ public class TimelineReaderWebServices {
    *     METRICS makes sense for flow runs hence only ALL or METRICS are
    *     supported as fields for fetching flow runs. Other fields will lead to
    *     HTTP 400 (Bad Request) response. (Optional query param).
-   * @param fromId Defines the flow run id. If specified, retrieve the next
-   *     set of flow runs from the given id. The set of flow runs retrieved
-   *     is inclusive of specified fromId.
+   * @param fromId If specified, retrieve the next set of flow run entities
+   *     from the given fromId. The set of entities retrieved is inclusive of
+   *     specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *     set of <cite>FlowRunEntity</cite> instances for the given flow are
@@ -1293,7 +1263,7 @@ public class TimelineReaderWebServices {
               TimelineEntityType.YARN_FLOW_RUN.toString(), null, null),
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, null, null, null,
-              null, null, null, null, fromId),
+              null, null, null, fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
           null, metricsToRetrieve, fields, null));
     } catch (Exception e) {
@@ -1423,7 +1393,7 @@ public class TimelineReaderWebServices {
       DateRange range = parseDateRange(dateRange);
       TimelineEntityFilters entityFilters =
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
-              limit, null, null, null, null, null, null, null, null, null,
+              limit, null, null, null, null, null, null, null, null,
               fromId);
       entityFilters.setCreatedTimeBegin(range.dateStart);
       entityFilters.setCreatedTimeEnd(range.dateEnd);
@@ -1744,9 +1714,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
-   * @param fromId Defines the application id. If specified, retrieve the next
-   *     set of applications from the given id. The set of applications
-   *     retrieved is inclusive of specified fromId.
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -1799,7 +1770,7 @@ public class TimelineReaderWebServices {
       entities = timelineReaderManager.getEntities(context,
           TimelineReaderWebServicesUtils.createTimelineEntityFilters(
           limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
-              infofilters, conffilters, metricfilters, eventfilters, null,
+              infofilters, conffilters, metricfilters, eventfilters,
               fromId),
           TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
           confsToRetrieve, metricsToRetrieve, fields, metricsLimit));
@@ -1876,9 +1847,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
-   * @param fromId Defines the application id. If specified, retrieve the next
-   *     set of applications from the given id. The set of applications
-   *     retrieved is inclusive of specified fromId.
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -1916,7 +1888,7 @@ public class TimelineReaderWebServices {
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, null, fromId);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromId);
   }
 
   /**
@@ -1980,9 +1952,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
-   * @param fromId Defines the application id. If specified, retrieve the next
-   *     set of applications from the given id. The set of applications
-   *     retrieved is inclusive of specified fromId.
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -2022,7 +1995,7 @@ public class TimelineReaderWebServices {
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, null, fromId);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromId);
   }
 
   /**
@@ -2083,9 +2056,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
-   * @param fromId Defines the application id. If specified, retrieve the next
-   *     set of applications from the given id. The set of applications
-   *     retrieved is inclusive of specified fromId.
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -2122,7 +2096,7 @@ public class TimelineReaderWebServices {
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         null, limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, null, fromId);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromId);
   }
 
   /**
@@ -2184,9 +2158,10 @@ public class TimelineReaderWebServices {
    *     or has a value less than 1, and metrics have to be retrieved, then
    *     metricsLimit will be considered as 1 i.e. latest single value of
    *     metric(s) will be returned. (Optional query param).
-   * @param fromId Defines the application id. If specified, retrieve the next
-   *     set of applications from the given id. The set of applications
-   *     retrieved is inclusive of specified fromId.
+   * @param fromId If specified, retrieve the next set of applications
+   *     from the given fromId. The set of applications retrieved is inclusive
+   *     of specified fromId. fromId should be taken from the value associated
+   *     with FROM_ID info key in entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing
    *     a set of <cite>TimelineEntity</cite> instances representing apps is
@@ -2224,7 +2199,7 @@ public class TimelineReaderWebServices {
         TimelineEntityType.YARN_APPLICATION.toString(), userId, flowName,
         null, limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, null, fromId);
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromId);
   }
 
   /**
@@ -2295,21 +2270,11 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
-   * @param fromIdPrefix If specified, retrieve entities with an id prefix
-   *          greater than or equal to the specified fromIdPrefix. If
-   *          fromIdPrefix is same for all entities of a given entity type, then
-   *          the user must provide fromId as a filter to denote the start
-   *          entity from which further entities will be fetched. fromIdPrefix
-   *          is mandatory even in the case the entity id prefix is not used and
-   *          should be set to 0.
-   * @param fromId If specified along with fromIdPrefix, retrieve entities with
-   *          an id prefix greater than or equal to specified id prefix in
-   *          fromIdPrefix and entity id lexicographically greater than or equal
-   *          to entity id specified in fromId. Please note than fromIdPrefix is
-   *          mandatory if fromId is specified, otherwise, the filter will be
-   *          ignored. It is recommended to provide both fromIdPrefix and fromId
-   *          filters for more accurate results as id prefix may not be unique
-   *          for an entity.
+   * @param fromId If specified, retrieve the next set of application-attempt
+   *         entities from the given fromId. The set of application-attempt
+   *         entities retrieved is inclusive of specified fromId. fromId should
+   *         be taken from the value associated with FROM_ID info key in
+   *         entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         set of <cite>TimelineEntity</cite> instances of the app-attempt
@@ -2343,13 +2308,12 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
-      @QueryParam("fromidprefix") String fromIdPrefix,
       @QueryParam("fromid") String fromId) {
 
     return getAppAttempts(req, res, null, appId, userId, flowName, flowRunId,
         limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo,
         infofilters, conffilters, metricfilters, eventfilters, confsToRetrieve,
-        metricsToRetrieve, fields, metricsLimit, fromIdPrefix, fromId);
+        metricsToRetrieve, fields, metricsLimit, fromId);
   }
 
   /**
@@ -2421,21 +2385,11 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
-   * @param fromIdPrefix If specified, retrieve entities with an id prefix
-   *          greater than or equal to the specified fromIdPrefix. If
-   *          fromIdPrefix is same for all entities of a given entity type, then
-   *          the user must provide fromId as a filter to denote the start
-   *          entity from which further entities will be fetched. fromIdPrefix
-   *          is mandatory even in the case the entity id prefix is not used and
-   *          should be set to 0.
-   * @param fromId If specified along with fromIdPrefix, retrieve entities with
-   *          an id prefix greater than or equal to specified id prefix in
-   *          fromIdPrefix and entity id lexicographically greater than or equal
-   *          to entity id specified in fromId. Please note than fromIdPrefix is
-   *          mandatory if fromId is specified, otherwise, the filter will be
-   *          ignored. It is recommended to provide both fromIdPrefix and fromId
-   *          filters for more accurate results as id prefix may not be unique
-   *          for an entity.
+   * @param fromId If specified, retrieve the next set of application-attempt
+   *         entities from the given fromId. The set of application-attempt
+   *         entities retrieved is inclusive of specified fromId. fromId should
+   *         be taken from the value associated with FROM_ID info key in
+   *         entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         set of <cite>TimelineEntity</cite> instances of the app-attempts
@@ -2470,7 +2424,6 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
-      @QueryParam("fromidprefix") String fromIdPrefix,
       @QueryParam("fromid") String fromId) {
 
     return getEntities(req, res, clusterId, appId,
@@ -2478,7 +2431,7 @@ public class TimelineReaderWebServices {
         flowName, flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
         confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
-        fromIdPrefix, fromId);
+        fromId);
   }
 
   /**
@@ -2700,21 +2653,11 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
-   * @param fromIdPrefix If specified, retrieve entities with an id prefix
-   *          greater than or equal to the specified fromIdPrefix. If
-   *          fromIdPrefix is same for all entities of a given entity type, then
-   *          the user must provide fromId as a filter to denote the start
-   *          entity from which further entities will be fetched. fromIdPrefix
-   *          is mandatory even in the case the entity id prefix is not used and
-   *          should be set to 0.
-   * @param fromId If specified along with fromIdPrefix, retrieve entities with
-   *          an id prefix greater than or equal to specified id prefix in
-   *          fromIdPrefix and entity id lexicographically greater than or equal
-   *          to entity id specified in fromId. Please note than fromIdPrefix is
-   *          mandatory if fromId is specified, otherwise, the filter will be
-   *          ignored. It is recommended to provide both fromIdPrefix and fromId
-   *          filters for more accurate results as id prefix may not be unique
-   *          for an entity.
+   * @param fromId If specified, retrieve the next set of container
+   *         entities from the given fromId. The set of container
+   *         entities retrieved is inclusive of specified fromId. fromId should
+   *         be taken from the value associated with FROM_ID info key in
+   *         entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         set of <cite>TimelineEntity</cite> instances of the containers
@@ -2749,12 +2692,11 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
-      @QueryParam("fromidprefix") String fromIdPrefix,
       @QueryParam("fromid") String fromId) {
     return getContainers(req, res, null, appId, appattemptId, userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilters, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromIdPrefix,
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
         fromId);
   }
 
@@ -2829,21 +2771,11 @@ public class TimelineReaderWebServices {
    *          have to be retrieved, then metricsLimit will be considered as 1
    *          i.e. latest single value of metric(s) will be returned. (Optional
    *          query param).
-   * @param fromIdPrefix If specified, retrieve entities with an id prefix
-   *          greater than or equal to the specified fromIdPrefix. If
-   *          fromIdPrefix is same for all entities of a given entity type, then
-   *          the user must provide fromId as a filter to denote the start
-   *          entity from which further entities will be fetched. fromIdPrefix
-   *          is mandatory even in the case the entity id prefix is not used and
-   *          should be set to 0.
-   * @param fromId If specified along with fromIdPrefix, retrieve entities with
-   *          an id prefix greater than or equal to specified id prefix in
-   *          fromIdPrefix and entity id lexicographically greater than or equal
-   *          to entity id specified in fromId. Please note than fromIdPrefix is
-   *          mandatory if fromId is specified, otherwise, the filter will be
-   *          ignored. It is recommended to provide both fromIdPrefix and fromId
-   *          filters for more accurate results as id prefix may not be unique
-   *          for an entity.
+   * @param fromId If specified, retrieve the next set of container
+   *         entities from the given fromId. The set of container
+   *         entities retrieved is inclusive of specified fromId. fromId should
+   *         be taken from the value associated with FROM_ID info key in
+   *         entity response which was sent earlier.
    *
    * @return If successful, a HTTP 200(OK) response having a JSON representing a
    *         set of <cite>TimelineEntity</cite> instances of the containers
@@ -2880,7 +2812,6 @@ public class TimelineReaderWebServices {
       @QueryParam("metricstoretrieve") String metricsToRetrieve,
       @QueryParam("fields") String fields,
       @QueryParam("metricslimit") String metricsLimit,
-      @QueryParam("fromidprefix") String fromIdPrefix,
       @QueryParam("fromid") String fromId) {
 
     String entityType = TimelineEntityType.YARN_CONTAINER.toString();
@@ -2899,7 +2830,7 @@ public class TimelineReaderWebServices {
     return getEntities(req, res, clusterId, appId, entityType, userId, flowName,
         flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo,
         isRelatedTo, infofilter, conffilters, metricfilters, eventfilters,
-        confsToRetrieve, metricsToRetrieve, fields, metricsLimit, fromIdPrefix,
+        confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
         fromId);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org