You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ha...@apache.org on 2018/02/23 19:44:42 UTC

[01/50] [abbrv] hadoop git commit: YARN-7918. Fix TestAMRMClientPlacementConstraints. (Gergely Novák via asuresh)

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12996 0898ff42e -> 59cf75887


YARN-7918. Fix TestAMRMClientPlacementConstraints. (Gergely Novák via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1e56a62
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1e56a62
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1e56a62

Branch: refs/heads/HDFS-12996
Commit: a1e56a62863d8d494af309ec5f476c4b7e4d5ef9
Parents: 31db977
Author: Arun Suresh <as...@apache.org>
Authored: Sat Feb 17 03:24:55 2018 -0800
Committer: Arun Suresh <as...@apache.org>
Committed: Sat Feb 17 03:24:55 2018 -0800

----------------------------------------------------------------------
 .../constraint/processor/PlacementConstraintProcessor.java        | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1e56a62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java
index f089a19..cf944a6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementConstraintProcessor.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor;
 
+import com.google.common.collect.Lists;
 import org.apache.hadoop.yarn.ams.ApplicationMasterServiceContext;
 import org.apache.hadoop.yarn.ams.ApplicationMasterServiceProcessor;
 import org.apache.hadoop.yarn.ams.ApplicationMasterServiceUtils;
@@ -329,7 +330,7 @@ public class PlacementConstraintProcessor extends AbstractPlacementProcessor {
     if (!isAdded) {
       BatchedRequests br = new BatchedRequests(iteratorType,
           schedulerResponse.getApplicationId(),
-          Collections.singleton(schedulerResponse.getSchedulingRequest()),
+          Lists.newArrayList(schedulerResponse.getSchedulingRequest()),
           placementAttempt + 1);
       reqsToRetry.add(br);
       br.addToBlacklist(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDFS-13159. TestTruncateQuotaUpdate fails in trunk. Contributed by Nanda kumar.

Posted by ha...@apache.org.
HDFS-13159. TestTruncateQuotaUpdate fails in trunk. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9028ccaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9028ccaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9028ccaf

Branch: refs/heads/HDFS-12996
Commit: 9028ccaf838621808e5e26a9fa933d28799538dd
Parents: 7280c5a
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Feb 20 10:57:35 2018 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Feb 20 11:40:20 2018 -0800

----------------------------------------------------------------------
 .../hdfs/server/namenode/TestTruncateQuotaUpdate.java     | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9028ccaf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
index fcdd650..f200d5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTruncateQuotaUpdate.java
@@ -22,6 +22,8 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffListByArrayList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
@@ -156,11 +158,11 @@ public class TestTruncateQuotaUpdate {
     FileDiff diff = mock(FileDiff.class);
     when(diff.getBlocks()).thenReturn(blocks);
     FileDiffList diffList = new FileDiffList();
-    Whitebox.setInternalState(diffList, "diffs", new ArrayList<FileDiff>());
+    Whitebox.setInternalState(diffList, "diffs", new DiffListByArrayList<>(0));
     @SuppressWarnings("unchecked")
-    ArrayList<FileDiff> diffs = ((ArrayList<FileDiff>)Whitebox.getInternalState
-        (diffList, "diffs"));
-    diffs.add(diff);
+    DiffList<FileDiff> diffs = (DiffList<FileDiff>)Whitebox.getInternalState(
+        diffList, "diffs");
+    diffs.addFirst(diff);
     FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffList);
     file.addFeature(sf);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: YARN-7916. Remove call to docker logs on failure in container-executor. Contributed by Shane Kumpf

Posted by ha...@apache.org.
YARN-7916. Remove call to docker logs on failure in container-executor. Contributed by Shane Kumpf


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3132709b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3132709b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3132709b

Branch: refs/heads/HDFS-12996
Commit: 3132709b46a35f70cf5278f3ace677e6e18a1d03
Parents: 2bc3351
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Feb 21 16:54:02 2018 -0600
Committer: Jason Lowe <jl...@apache.org>
Committed: Wed Feb 21 16:54:02 2018 -0600

----------------------------------------------------------------------
 .../impl/container-executor.c                   | 35 --------------------
 1 file changed, 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3132709b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 035c694..751949e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1435,20 +1435,16 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
   char *exit_code_file = NULL;
   char *docker_command_with_binary = NULL;
   char *docker_wait_command = NULL;
-  char *docker_logs_command = NULL;
   char *docker_inspect_command = NULL;
   char *docker_rm_command = NULL;
   char *docker_inspect_exitcode_command = NULL;
   int container_file_source =-1;
   int cred_file_source = -1;
-  int BUFFER_SIZE = 4096;
-  char buffer[BUFFER_SIZE];
 
   size_t command_size = MIN(sysconf(_SC_ARG_MAX), 128*1024);
 
   docker_command_with_binary = (char *) alloc_and_clear_memory(command_size, sizeof(char));
   docker_wait_command = (char *) alloc_and_clear_memory(command_size, sizeof(char));
-  docker_logs_command = (char *) alloc_and_clear_memory(command_size, sizeof(char));
   docker_inspect_command = (char *) alloc_and_clear_memory(command_size, sizeof(char));
   docker_rm_command = (char *) alloc_and_clear_memory(command_size, sizeof(char));
   docker_inspect_exitcode_command = (char *) alloc_and_clear_memory(command_size, sizeof(char));
@@ -1600,36 +1596,6 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
     goto cleanup;
   }
   fprintf(LOGFILE, "Exit code from docker inspect: %d\n", exit_code);
-  if(exit_code != 0) {
-    fprintf(ERRORFILE, "Docker container exit code was not zero: %d\n",
-    exit_code);
-    snprintf(docker_logs_command, command_size, "%s logs --tail=250 %s",
-      docker_binary, container_id);
-    FILE* logs = popen(docker_logs_command, "r");
-    if(logs != NULL) {
-      clearerr(logs);
-      res = fread(buffer, BUFFER_SIZE, 1, logs);
-      if(res < 1) {
-        fprintf(ERRORFILE, "%s %d %d\n",
-          "Unable to read from docker logs(ferror, feof):", ferror(logs), feof(logs));
-        fflush(ERRORFILE);
-      }
-      else {
-        fprintf(ERRORFILE, "%s\n", buffer);
-        fflush(ERRORFILE);
-      }
-    }
-    else {
-      fprintf(ERRORFILE, "%s\n", "Failed to get output of docker logs");
-      fprintf(ERRORFILE, "Command was '%s'\n", docker_logs_command);
-      fprintf(ERRORFILE, "%s\n", strerror(errno));
-      fflush(ERRORFILE);
-    }
-    if(pclose(logs) != 0) {
-      fprintf(ERRORFILE, "%s\n", "Failed to fetch docker logs");
-      fflush(ERRORFILE);
-    }
-  }
 
 cleanup:
 
@@ -1662,7 +1628,6 @@ cleanup:
   free(cred_file_dest);
   free(docker_command_with_binary);
   free(docker_wait_command);
-  free(docker_logs_command);
   free(docker_inspect_command);
   free(docker_rm_command);
   return exit_code;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
new file mode 100644
index 0000000..e89a6a7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the application table.
+ */
+public class ApplicationRowKey {
+  private final String clusterId;
+  private final String userId;
+  private final String flowName;
+  private final Long flowRunId;
+  private final String appId;
+  private final ApplicationRowKeyConverter appRowKeyConverter =
+      new ApplicationRowKeyConverter();
+
+  public ApplicationRowKey(String clusterId, String userId, String flowName,
+      Long flowRunId, String appId) {
+    this.clusterId = clusterId;
+    this.userId = userId;
+    this.flowName = flowName;
+    this.flowRunId = flowRunId;
+    this.appId = appId;
+  }
+
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  public String getFlowName() {
+    return flowName;
+  }
+
+  public Long getFlowRunId() {
+    return flowRunId;
+  }
+
+  public String getAppId() {
+    return appId;
+  }
+
+  /**
+   * Constructs a row key for the application table as follows:
+   * {@code clusterId!userName!flowName!flowRunId!AppId}.
+   *
+   * @return byte array with the row key
+   */
+  public byte[] getRowKey() {
+    return appRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return An <cite>ApplicationRowKey</cite> object.
+   */
+  public static ApplicationRowKey parseRowKey(byte[] rowKey) {
+    return new ApplicationRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the application table as follows:
+   * {@code clusterId!userName!flowName!flowRunId!AppId}.
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return appRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>ApplicationRowKey</cite> object.
+   */
+  public static ApplicationRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new ApplicationRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for application table. The row key is of the
+   * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
+   * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
+   * strings.
+   * <p>
+   */
+  final private static class ApplicationRowKeyConverter implements
+      KeyConverter<ApplicationRowKey>, KeyConverterToString<ApplicationRowKey> {
+
+    private final KeyConverter<String> appIDKeyConverter =
+        new AppIdKeyConverter();
+
+    /**
+     * Intended for use in ApplicationRowKey only.
+     */
+    private ApplicationRowKeyConverter() {
+    }
+
+    /**
+     * Application row key is of the form
+     * clusterId!userName!flowName!flowRunId!appId with each segment separated
+     * by !. The sizes below indicate sizes of each one of these segements in
+     * sequence. clusterId, userName and flowName are strings. flowrunId is a
+     * long hence 8 bytes in size. app id is represented as 12 bytes with
+     * cluster timestamp part of appid takes 8 bytes(long) and seq id takes 4
+     * bytes(int). Strings are variable in size (i.e. end whenever separator is
+     * encountered). This is used while decoding and helps in determining where
+     * to split.
+     */
+    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+        AppIdKeyConverter.getKeySize() };
+
+    /*
+     * (non-Javadoc)
+     *
+     * Encodes ApplicationRowKey object into a byte array with each
+     * component/field in ApplicationRowKey separated by Separator#QUALIFIERS.
+     * This leads to an application table row key of the form
+     * clusterId!userName!flowName!flowRunId!appId If flowRunId in passed
+     * ApplicationRowKey object is null (and the fields preceding it i.e.
+     * clusterId, userId and flowName are not null), this returns a row key
+     * prefix of the form clusterId!userName!flowName! and if appId in
+     * ApplicationRowKey is null (other 4 components all are not null), this
+     * returns a row key prefix of the form
+     * clusterId!userName!flowName!flowRunId! flowRunId is inverted while
+     * encoding as it helps maintain a descending order for row keys in the
+     * application table.
+     *
+     * @see
+     * org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#encode(java.lang.Object)
+     */
+    @Override
+    public byte[] encode(ApplicationRowKey rowKey) {
+      byte[] cluster =
+          Separator.encode(rowKey.getClusterId(), Separator.SPACE,
+              Separator.TAB, Separator.QUALIFIERS);
+      byte[] user =
+          Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
+              Separator.QUALIFIERS);
+      byte[] flow =
+          Separator.encode(rowKey.getFlowName(), Separator.SPACE,
+              Separator.TAB, Separator.QUALIFIERS);
+      byte[] first = Separator.QUALIFIERS.join(cluster, user, flow);
+      // Note that flowRunId is a long, so we can't encode them all at the same
+      // time.
+      if (rowKey.getFlowRunId() == null) {
+        return Separator.QUALIFIERS.join(first, Separator.EMPTY_BYTES);
+      }
+      byte[] second =
+          Bytes.toBytes(LongConverter.invertLong(
+              rowKey.getFlowRunId()));
+      if (rowKey.getAppId() == null || rowKey.getAppId().isEmpty()) {
+        return Separator.QUALIFIERS.join(first, second, Separator.EMPTY_BYTES);
+      }
+      byte[] third = appIDKeyConverter.encode(rowKey.getAppId());
+      return Separator.QUALIFIERS.join(first, second, third);
+    }
+
+    /*
+     * (non-Javadoc)
+     *
+     * Decodes an application row key of the form
+     * clusterId!userName!flowName!flowRunId!appId represented in byte format
+     * and converts it into an ApplicationRowKey object.flowRunId is inverted
+     * while decoding as it was inverted while encoding.
+     *
+     * @see
+     * org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#decode(byte[])
+     */
+    @Override
+    public ApplicationRowKey decode(byte[] rowKey) {
+      byte[][] rowKeyComponents =
+          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
+      if (rowKeyComponents.length != 5) {
+        throw new IllegalArgumentException("the row key is not valid for "
+            + "an application");
+      }
+      String clusterId =
+          Separator.decode(Bytes.toString(rowKeyComponents[0]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String userId =
+          Separator.decode(Bytes.toString(rowKeyComponents[1]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String flowName =
+          Separator.decode(Bytes.toString(rowKeyComponents[2]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      Long flowRunId =
+          LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
+      String appId = appIDKeyConverter.decode(rowKeyComponents[4]);
+      return new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
+          appId);
+    }
+
+    @Override
+    public String encodeAsString(ApplicationRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null || key.appId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils
+          .joinAndEscapeStrings(new String[] {key.clusterId, key.userId,
+              key.flowName, key.flowRunId.toString(), key.appId});
+    }
+
+    @Override
+    public ApplicationRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 5) {
+        throw new IllegalArgumentException(
+            "Invalid row key for application table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      return new ApplicationRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId, split.get(4));
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKeyPrefix.java
new file mode 100644
index 0000000..f61b0e9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKeyPrefix.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+
+/**
+ * Represents a partial rowkey (without flowName or without flowName and
+ * flowRunId) for the application table.
+ */
+public class ApplicationRowKeyPrefix extends ApplicationRowKey implements
+    RowKeyPrefix<ApplicationRowKey> {
+
+  /**
+   * Creates a prefix which generates the following rowKeyPrefixes for the
+   * application table: {@code clusterId!userName!flowName!}.
+   *
+   * @param clusterId the cluster on which applications ran
+   * @param userId the user that ran applications
+   * @param flowName the name of the flow that was run by the user on the
+   *          cluster
+   */
+  public ApplicationRowKeyPrefix(String clusterId, String userId,
+      String flowName) {
+    super(clusterId, userId, flowName, null, null);
+  }
+
+  /**
+   * Creates a prefix which generates the following rowKeyPrefixes for the
+   * application table: {@code clusterId!userName!flowName!flowRunId!}.
+   *
+   * @param clusterId identifying the cluster
+   * @param userId identifying the user
+   * @param flowName identifying the flow
+   * @param flowRunId identifying the instance of this flow
+   */
+  public ApplicationRowKeyPrefix(String clusterId, String userId,
+      String flowName, Long flowRunId) {
+    super(clusterId, userId, flowName, flowRunId, null);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.application.
+   * RowKeyPrefix#getRowKeyPrefix()
+   */
+  @Override
+  public byte[] getRowKeyPrefix() {
+    return super.getRowKey();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
new file mode 100644
index 0000000..16ab5fa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+
+/**
+ * The application table as column families info, config and metrics. Info
+ * stores information about a YARN application entity, config stores
+ * configuration data of a YARN application, metrics stores the metrics of a
+ * YARN application. This table is entirely analogous to the entity table but
+ * created for better performance.
+ *
+ * Example application table record:
+ *
+ * <pre>
+ * |-------------------------------------------------------------------------|
+ * |  Row       | Column Family                | Column Family| Column Family|
+ * |  key       | info                         | metrics      | config       |
+ * |-------------------------------------------------------------------------|
+ * | clusterId! | id:appId                     | metricId1:   | configKey1:  |
+ * | userName!  |                              | metricValue1 | configValue1 |
+ * | flowName!  | created_time:                | @timestamp1  |              |
+ * | flowRunId! | 1392993084018                |              | configKey2:  |
+ * | AppId      |                              | metriciD1:   | configValue2 |
+ * |            | i!infoKey:                   | metricValue2 |              |
+ * |            | infoValue                    | @timestamp2  |              |
+ * |            |                              |              |              |
+ * |            | r!relatesToKey:              | metricId2:   |              |
+ * |            | id3=id4=id5                  | metricValue1 |              |
+ * |            |                              | @timestamp2  |              |
+ * |            | s!isRelatedToKey:            |              |              |
+ * |            | id7=id9=id6                  |              |              |
+ * |            |                              |              |              |
+ * |            | e!eventId=timestamp=infoKey: |              |              |
+ * |            | eventInfoValue               |              |              |
+ * |            |                              |              |              |
+ * |            | flowVersion:                 |              |              |
+ * |            | versionValue                 |              |              |
+ * |-------------------------------------------------------------------------|
+ * </pre>
+ */
+public final class ApplicationTable extends BaseTable<ApplicationTable> {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
new file mode 100644
index 0000000..03f508f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.application
+ * contains classes related to implementation for application table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
new file mode 100644
index 0000000..0065f07
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies fully qualified columns for the {@link AppToFlowTable}.
+ */
+public enum AppToFlowColumn implements Column<AppToFlowTable> {
+
+  /**
+   * The flow ID.
+   */
+  FLOW_ID(AppToFlowColumnFamily.MAPPING, "flow_id"),
+
+  /**
+   * The flow run ID.
+   */
+  FLOW_RUN_ID(AppToFlowColumnFamily.MAPPING, "flow_run_id"),
+
+  /**
+   * The user.
+   */
+  USER_ID(AppToFlowColumnFamily.MAPPING, "user_id");
+
+  private final ColumnFamily<AppToFlowTable> columnFamily;
+  private final String columnQualifier;
+  private final byte[] columnQualifierBytes;
+  private final ValueConverter valueConverter;
+
+  AppToFlowColumn(ColumnFamily<AppToFlowTable> columnFamily,
+      String columnQualifier) {
+    this.columnFamily = columnFamily;
+    this.columnQualifier = columnQualifier;
+    // Future-proof by ensuring the right column prefix hygiene.
+    this.columnQualifierBytes =
+        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
+    this.valueConverter = GenericConverter.getInstance();
+  }
+
+  /**
+   * @return the column name value
+   */
+  private String getColumnQualifier() {
+    return columnQualifier;
+  }
+
+  @Override
+  public byte[] getColumnQualifierBytes() {
+    return columnQualifierBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return attributes;
+  }
+
+  @Override
+  public boolean supplementCellTimestamp() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
new file mode 100644
index 0000000..f3f045e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the app_flow table column families.
+ */
+public enum AppToFlowColumnFamily implements ColumnFamily<AppToFlowTable> {
+  /**
+   * Mapping column family houses known columns such as flowName and flowRunId.
+   */
+  MAPPING("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *          without spaces.
+   */
+  AppToFlowColumnFamily(String value) {
+    // column families should be lower case and not contain any spaces.
+    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+    return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
new file mode 100644
index 0000000..9540129
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the app-to-flow table.
+ */
+public enum AppToFlowColumnPrefix implements ColumnPrefix<AppToFlowTable> {
+
+  /**
+   * The flow name.
+   */
+  FLOW_NAME(AppToFlowColumnFamily.MAPPING, "flow_name"),
+
+  /**
+   * The flow run ID.
+   */
+  FLOW_RUN_ID(AppToFlowColumnFamily.MAPPING, "flow_run_id"),
+
+  /**
+   * The user.
+   */
+  USER_ID(AppToFlowColumnFamily.MAPPING, "user_id");
+
+  private final ColumnFamily<AppToFlowTable> columnFamily;
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+  private final ValueConverter valueConverter;
+
+  AppToFlowColumnPrefix(ColumnFamily<AppToFlowTable> columnFamily,
+      String columnPrefix) {
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes =
+          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
+    }
+    this.valueConverter = GenericConverter.getInstance();
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixInBytes() {
+    return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return attributes;
+  }
+
+  @Override
+  public boolean supplementCellTimeStamp() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
new file mode 100644
index 0000000..146c475
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+
+/**
+ * Represents a row key for the app_flow table, which is the app id.
+ */
+public class AppToFlowRowKey {
+  private final String appId;
+  private final KeyConverter<String> appIdKeyConverter =
+      new AppIdKeyConverter();
+
+  public AppToFlowRowKey(String appId) {
+    this.appId = appId;
+  }
+
+  public String getAppId() {
+    return appId;
+  }
+
+  /**
+   * Constructs a row key prefix for the app_flow table.
+   *
+   * @return byte array with the row key
+   */
+  public  byte[] getRowKey() {
+    return appIdKeyConverter.encode(appId);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey a rowkey represented as a byte array.
+   * @return an <cite>AppToFlowRowKey</cite> object.
+   */
+  public static AppToFlowRowKey parseRowKey(byte[] rowKey) {
+    String appId = new AppIdKeyConverter().decode(rowKey);
+    return new AppToFlowRowKey(appId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
new file mode 100644
index 0000000..e184288
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+
+/**
+ * The app_flow table as column families mapping. Mapping stores
+ * appId to flowName and flowRunId mapping information
+ *
+ * Example app_flow table record:
+ *
+ * <pre>
+ * |--------------------------------------|
+ * |  Row       | Column Family           |
+ * |  key       | mapping                 |
+ * |--------------------------------------|
+ * | appId      | flow_name!cluster1:     |
+ * |            | foo@daily_hive_report   |
+ * |            |                         |
+ * |            | flow_run_id!cluster1:   |
+ * |            | 1452828720457           |
+ * |            |                         |
+ * |            | user_id!cluster1:       |
+ * |            | admin                   |
+ * |            |                         |
+ * |            | flow_name!cluster2:     |
+ * |            | bar@ad_hoc_query        |
+ * |            |                         |
+ * |            | flow_run_id!cluster2:   |
+ * |            | 1452828498752           |
+ * |            |                         |
+ * |            | user_id!cluster2:       |
+ * |            | joe                     |
+ * |            |                         |
+ * |--------------------------------------|
+ * </pre>
+ *
+ * It is possible (although unlikely) in a multi-cluster environment that there
+ * may be more than one applications for a given app id. Different clusters are
+ * recorded as different sets of columns.
+ */
+public final class AppToFlowTable extends BaseTable<AppToFlowTable> {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
new file mode 100644
index 0000000..f01d982
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow
+ * contains classes related to implementation for app to flow table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java
new file mode 100644
index 0000000..d934f74
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+/**
+ * Encodes and decodes {@link ApplicationId} for row keys.
+ * App ID is stored in row key as 12 bytes, cluster timestamp section of app id
+ * (long - 8 bytes) followed by sequence id section of app id (int - 4 bytes).
+ */
+public final class AppIdKeyConverter implements KeyConverter<String> {
+
+  public AppIdKeyConverter() {
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * Converts/encodes a string app Id into a byte representation for (row) keys.
+   * For conversion, we extract cluster timestamp and sequence id from the
+   * string app id (calls ConverterUtils#toApplicationId(String) for
+   * conversion) and then store it in a byte array of length 12 (8 bytes (long)
+   * for cluster timestamp followed 4 bytes(int) for sequence id). Both cluster
+   * timestamp and sequence id are inverted so that the most recent cluster
+   * timestamp and highest sequence id appears first in the table (i.e.
+   * application id appears in a descending order).
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #encode(java.lang.Object)
+   */
+  @Override
+  public byte[] encode(String appIdStr) {
+    ApplicationId appId = ApplicationId.fromString(appIdStr);
+    byte[] appIdBytes = new byte[getKeySize()];
+    byte[] clusterTs = Bytes.toBytes(
+        LongConverter.invertLong(appId.getClusterTimestamp()));
+    System.arraycopy(clusterTs, 0, appIdBytes, 0, Bytes.SIZEOF_LONG);
+    byte[] seqId = Bytes.toBytes(
+        HBaseTimelineSchemaUtils.invertInt(appId.getId()));
+    System.arraycopy(seqId, 0, appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT);
+    return appIdBytes;
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * Converts/decodes a 12 byte representation of app id for (row) keys to an
+   * app id in string format which can be returned back to client.
+   * For decoding, 12 bytes are interpreted as 8 bytes of inverted cluster
+   * timestamp(long) followed by 4 bytes of inverted sequence id(int). Calls
+   * ApplicationId#toString to generate string representation of app id.
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #decode(byte[])
+   */
+  @Override
+  public String decode(byte[] appIdBytes) {
+    if (appIdBytes.length != getKeySize()) {
+      throw new IllegalArgumentException("Invalid app id in byte format");
+    }
+    long clusterTs = LongConverter.invertLong(
+        Bytes.toLong(appIdBytes, 0, Bytes.SIZEOF_LONG));
+    int seqId = HBaseTimelineSchemaUtils.invertInt(
+        Bytes.toInt(appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT));
+    return HBaseTimelineSchemaUtils.convertApplicationIdToString(
+        ApplicationId.newInstance(clusterTs, seqId));
+  }
+
+  /**
+   * Returns the size of app id after encoding.
+   *
+   * @return size of app id after encoding.
+   */
+  public static int getKeySize() {
+    return Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
new file mode 100644
index 0000000..433b352
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+
+/**
+ * The base type of tables.
+ * @param T table type
+ */
+public abstract class BaseTable<T> {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Column.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Column.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Column.java
new file mode 100644
index 0000000..2b50252
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Column.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * A Column represents the way to store a fully qualified column in a specific
+ * table.
+ */
+public interface Column<T extends BaseTable<T>> {
+  /**
+   * Returns column family name(as bytes) associated with this column.
+   * @return a byte array encoding column family for this column qualifier.
+   */
+  byte[] getColumnFamilyBytes();
+
+  /**
+   * Get byte representation for this column qualifier.
+   * @return a byte array representing column qualifier.
+   */
+  byte[] getColumnQualifierBytes();
+
+  /**
+   * Returns value converter implementation associated with this column.
+   * @return a {@link ValueConverter} implementation.
+   */
+  ValueConverter getValueConverter();
+
+  /**
+   * Return attributed combined with aggregations, if any.
+   * @return an array of Attributes
+   */
+  Attribute[] getCombinedAttrsWithAggr(Attribute... attributes);
+
+  /**
+   * Return true if the cell timestamp needs to be supplemented.
+   * @return true if the cell timestamp needs to be supplemented
+   */
+  boolean supplementCellTimestamp();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnFamily.java
new file mode 100644
index 0000000..c5c8cb4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnFamily.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+/**
+ * Type safe column family.
+ *
+ * @param <T> refers to the table for which this column family is used for.
+ */
+public interface ColumnFamily<T extends BaseTable<T>> {
+
+  /**
+   * Keep a local copy if you need to avoid overhead of repeated cloning.
+   *
+   * @return a clone of the byte representation of the column family.
+   */
+  byte[] getBytes();
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
new file mode 100644
index 0000000..b173ef2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * This class is meant to be used only by explicit Columns, and not directly to
+ * write by clients.
+ */
+public final class ColumnHelper {
+
+  private ColumnHelper() {
+  }
+
+
+  /**
+   * @param columnPrefixBytes The byte representation for the column prefix.
+   *          Should not contain {@link Separator#QUALIFIERS}.
+   * @param qualifier for the remainder of the column.
+   *          {@link Separator#QUALIFIERS} is permissible in the qualifier
+   *          as it is joined only with the column prefix bytes.
+   * @return fully sanitized column qualifier that is a combination of prefix
+   *         and qualifier. If prefix is null, the result is simply the encoded
+   *         qualifier without any separator.
+   */
+  public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
+      String qualifier) {
+
+    // We don't want column names to have spaces / tabs.
+    byte[] encodedQualifier =
+        Separator.encode(qualifier, Separator.SPACE, Separator.TAB);
+    if (columnPrefixBytes == null) {
+      return encodedQualifier;
+    }
+
+    // Convert qualifier to lower case, strip of separators and tag on column
+    // prefix.
+    byte[] columnQualifier =
+        Separator.QUALIFIERS.join(columnPrefixBytes, encodedQualifier);
+    return columnQualifier;
+  }
+
+  /**
+   * @param columnPrefixBytes The byte representation for the column prefix.
+   *          Should not contain {@link Separator#QUALIFIERS}.
+   * @param qualifier for the remainder of the column.
+   * @return fully sanitized column qualifier that is a combination of prefix
+   *         and qualifier. If prefix is null, the result is simply the encoded
+   *         qualifier without any separator.
+   */
+  public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
+      long qualifier) {
+
+    if (columnPrefixBytes == null) {
+      return Bytes.toBytes(qualifier);
+    }
+
+    // Convert qualifier to lower case, strip of separators and tag on column
+    // prefix.
+    byte[] columnQualifier =
+        Separator.QUALIFIERS.join(columnPrefixBytes, Bytes.toBytes(qualifier));
+    return columnQualifier;
+  }
+
+  /**
+   * @param columnPrefixBytes The byte representation for the column prefix.
+   *          Should not contain {@link Separator#QUALIFIERS}.
+   * @param qualifier the byte representation for the remainder of the column.
+   * @return fully sanitized column qualifier that is a combination of prefix
+   *         and qualifier. If prefix is null, the result is simply the encoded
+   *         qualifier without any separator.
+   */
+  public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
+      byte[] qualifier) {
+
+    if (columnPrefixBytes == null) {
+      return qualifier;
+    }
+
+    byte[] columnQualifier =
+        Separator.QUALIFIERS.join(columnPrefixBytes, qualifier);
+    return columnQualifier;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
new file mode 100644
index 0000000..cbcd936
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Used to represent a partially qualified column, where the actual column name
+ * will be composed of a prefix and the remainder of the column qualifier. The
+ * prefix can be null, in which case the column qualifier will be completely
+ * determined when the values are stored.
+ */
+public interface ColumnPrefix<T extends BaseTable<T>> {
+
+  /**
+   * @param qualifierPrefix Column qualifier or prefix of qualifier.
+   * @return a byte array encoding column prefix and qualifier/prefix passed.
+   */
+  byte[] getColumnPrefixBytes(String qualifierPrefix);
+
+  /**
+   * @param qualifierPrefix Column qualifier or prefix of qualifier.
+   * @return a byte array encoding column prefix and qualifier/prefix passed.
+   */
+  byte[] getColumnPrefixBytes(byte[] qualifierPrefix);
+
+  /**
+   * Get the column prefix in bytes.
+   * @return column prefix in bytes
+   */
+  byte[] getColumnPrefixInBytes();
+
+  /**
+   * Returns column family name(as bytes) associated with this column prefix.
+   * @return a byte array encoding column family for this prefix.
+   */
+  byte[] getColumnFamilyBytes();
+
+  /**
+   * Returns value converter implementation associated with this column prefix.
+   * @return a {@link ValueConverter} implementation.
+   */
+  ValueConverter getValueConverter();
+
+  /**
+   * Return attributed combined with aggregations, if any.
+   * @return an array of Attributes
+   */
+  Attribute[] getCombinedAttrsWithAggr(Attribute... attributes);
+
+  /**
+   * Return true if the cell timestamp needs to be supplemented.
+   * @return true if the cell timestamp needs to be supplemented
+   */
+  boolean supplementCellTimeStamp();
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnName.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnName.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnName.java
new file mode 100644
index 0000000..8445575
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnName.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+/**
+ * Encapsulates information about Event column names for application and entity
+ * tables. Used while encoding/decoding event column names.
+ */
+public class EventColumnName {
+
+  private final String id;
+  private final Long timestamp;
+  private final String infoKey;
+  private final KeyConverter<EventColumnName> eventColumnNameConverter =
+      new EventColumnNameConverter();
+
+  public EventColumnName(String id, Long timestamp, String infoKey) {
+    this.id = id;
+    this.timestamp = timestamp;
+    this.infoKey = infoKey;
+  }
+
+  public String getId() {
+    return id;
+  }
+
+  public Long getTimestamp() {
+    return timestamp;
+  }
+
+  public String getInfoKey() {
+    return infoKey;
+  }
+
+  /**
+   * @return a byte array with each components/fields separated by
+   *         Separator#VALUES. This leads to an event column name of the form
+   *         eventId=timestamp=infokey. If both timestamp and infokey are null,
+   *         then a qualifier of the form eventId=timestamp= is returned. If
+   *         only infokey is null, then a qualifier of the form eventId= is
+   *         returned. These prefix forms are useful for queries that intend to
+   *         retrieve more than one specific column name.
+   */
+  public byte[] getColumnQualifier() {
+    return eventColumnNameConverter.encode(this);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
new file mode 100644
index 0000000..d3ef897
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Encodes and decodes event column names for application and entity tables.
+ * The event column name is of the form : eventId=timestamp=infokey.
+ * If info is not associated with the event, event column name is of the form :
+ * eventId=timestamp=
+ * Event timestamp is long and rest are strings.
+ * Column prefixes are not part of the eventcolumn name passed for encoding. It
+ * is added later, if required in the associated ColumnPrefix implementations.
+ */
+public final class EventColumnNameConverter
+    implements KeyConverter<EventColumnName> {
+
+  public EventColumnNameConverter() {
+  }
+
+  // eventId=timestamp=infokey are of types String, Long String
+  // Strings are variable in size (i.e. end whenever separator is encountered).
+  // This is used while decoding and helps in determining where to split.
+  private static final int[] SEGMENT_SIZES = {
+      Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE };
+
+  /*
+   * (non-Javadoc)
+   *
+   * Encodes EventColumnName into a byte array with each component/field in
+   * EventColumnName separated by Separator#VALUES. This leads to an event
+   * column name of the form eventId=timestamp=infokey.
+   * If timestamp in passed EventColumnName object is null (eventId is not null)
+   * this returns a column prefix of the form eventId= and if infokey in
+   * EventColumnName is null (other 2 components are not null), this returns a
+   * column name of the form eventId=timestamp=
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #encode(java.lang.Object)
+   */
+  @Override
+  public byte[] encode(EventColumnName key) {
+    byte[] first = Separator.encode(key.getId(), Separator.SPACE, Separator.TAB,
+        Separator.VALUES);
+    if (key.getTimestamp() == null) {
+      return Separator.VALUES.join(first, Separator.EMPTY_BYTES);
+    }
+    byte[] second = Bytes.toBytes(
+        LongConverter.invertLong(key.getTimestamp()));
+    if (key.getInfoKey() == null) {
+      return Separator.VALUES.join(first, second, Separator.EMPTY_BYTES);
+    }
+    return Separator.VALUES.join(first, second, Separator.encode(
+        key.getInfoKey(), Separator.SPACE, Separator.TAB, Separator.VALUES));
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * Decodes an event column name of the form eventId=timestamp= or
+   * eventId=timestamp=infoKey represented in byte format and converts it into
+   * an EventColumnName object.
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #decode(byte[])
+   */
+  @Override
+  public EventColumnName decode(byte[] bytes) {
+    byte[][] components = Separator.VALUES.split(bytes, SEGMENT_SIZES);
+    if (components.length != 3) {
+      throw new IllegalArgumentException("the column name is not valid");
+    }
+    String id = Separator.decode(Bytes.toString(components[0]),
+        Separator.VALUES, Separator.TAB, Separator.SPACE);
+    Long ts = LongConverter.invertLong(Bytes.toLong(components[1]));
+    String infoKey = components[2].length == 0 ? null :
+        Separator.decode(Bytes.toString(components[2]),
+            Separator.VALUES, Separator.TAB, Separator.SPACE);
+    return new EventColumnName(id, ts, infoKey);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/GenericConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/GenericConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/GenericConverter.java
new file mode 100644
index 0000000..c34bfcb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/GenericConverter.java
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+
+import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
+
+/**
+ * Uses GenericObjectMapper to encode objects as bytes and decode bytes as
+ * objects.
+ */
+public final class GenericConverter implements ValueConverter {
+  private static final GenericConverter INSTANCE = new GenericConverter();
+
+  private GenericConverter() {
+  }
+
+  public static GenericConverter getInstance() {
+    return INSTANCE;
+  }
+
+  @Override
+  public byte[] encodeValue(Object value) throws IOException {
+    return GenericObjectMapper.write(value);
+  }
+
+  @Override
+  public Object decodeValue(byte[] bytes) throws IOException {
+    return GenericObjectMapper.read(bytes);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineSchemaUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineSchemaUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineSchemaUtils.java
new file mode 100644
index 0000000..e5f92cc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineSchemaUtils.java
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+import java.text.NumberFormat;
+
+/**
+ * A bunch of utility functions used in HBase TimelineService common module.
+ */
+public final class HBaseTimelineSchemaUtils {
+  /** milliseconds in one day. */
+  public static final long MILLIS_ONE_DAY = 86400000L;
+
+  private static final ThreadLocal<NumberFormat> APP_ID_FORMAT =
+      new ThreadLocal<NumberFormat>() {
+        @Override
+        public NumberFormat initialValue() {
+          NumberFormat fmt = NumberFormat.getInstance();
+          fmt.setGroupingUsed(false);
+          fmt.setMinimumIntegerDigits(4);
+          return fmt;
+        }
+      };
+
+  private HBaseTimelineSchemaUtils() {
+  }
+
+  /**
+   * Combines the input array of attributes and the input aggregation operation
+   * into a new array of attributes.
+   *
+   * @param attributes Attributes to be combined.
+   * @param aggOp Aggregation operation.
+   * @return array of combined attributes.
+   */
+  public static Attribute[] combineAttributes(Attribute[] attributes,
+      AggregationOperation aggOp) {
+    int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
+    Attribute[] combinedAttributes = new Attribute[newLength];
+
+    if (attributes != null) {
+      System.arraycopy(attributes, 0, combinedAttributes, 0, attributes.length);
+    }
+
+    if (aggOp != null) {
+      Attribute a2 = aggOp.getAttribute();
+      combinedAttributes[newLength - 1] = a2;
+    }
+    return combinedAttributes;
+  }
+
+  /**
+   * Returns a number for the new array size. The new array is the combination
+   * of input array of attributes and the input aggregation operation.
+   *
+   * @param attributes Attributes.
+   * @param aggOp Aggregation operation.
+   * @return the size for the new array
+   */
+  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
+      AggregationOperation aggOp) {
+    int oldLength = getAttributesLength(attributes);
+    int aggLength = getAppOpLength(aggOp);
+    return oldLength + aggLength;
+  }
+
+  private static int getAppOpLength(AggregationOperation aggOp) {
+    if (aggOp != null) {
+      return 1;
+    }
+    return 0;
+  }
+
+  private static int getAttributesLength(Attribute[] attributes) {
+    if (attributes != null) {
+      return attributes.length;
+    }
+    return 0;
+  }
+
+  /**
+   * Converts an int into it's inverse int to be used in (row) keys
+   * where we want to have the largest int value in the top of the table
+   * (scans start at the largest int first).
+   *
+   * @param key value to be inverted so that the latest version will be first in
+   *          a scan.
+   * @return inverted int
+   */
+  public static int invertInt(int key) {
+    return Integer.MAX_VALUE - key;
+  }
+
+  /**
+   * returns the timestamp of that day's start (which is midnight 00:00:00 AM)
+   * for a given input timestamp.
+   *
+   * @param ts Timestamp.
+   * @return timestamp of that day's beginning (midnight)
+   */
+  public static long getTopOfTheDayTimestamp(long ts) {
+    long dayTimestamp = ts - (ts % MILLIS_ONE_DAY);
+    return dayTimestamp;
+  }
+
+  /**
+   * Checks if passed object is of integral type(Short/Integer/Long).
+   *
+   * @param obj Object to be checked.
+   * @return true if object passed is of type Short or Integer or Long, false
+   * otherwise.
+   */
+  public static boolean isIntegralValue(Object obj) {
+    return (obj instanceof Short) || (obj instanceof Integer) ||
+        (obj instanceof Long);
+  }
+
+  /**
+   * A utility method that converts ApplicationId to string without using
+   * FastNumberFormat in order to avoid the incompatibility issue caused
+   * by mixing hadoop-common 2.5.1 and hadoop-yarn-api 3.0 in this module.
+   * This is a work-around implementation as discussed in YARN-6905.
+   *
+   * @param appId application id
+   * @return the string representation of the given application id
+   *
+   */
+  public static String convertApplicationIdToString(ApplicationId appId) {
+    StringBuilder sb = new StringBuilder(64);
+    sb.append(ApplicationId.appIdStrPrefix);
+    sb.append("_");
+    sb.append(appId.getClusterTimestamp());
+    sb.append('_');
+    sb.append(APP_ID_FORMAT.get().format(appId.getId()));
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java
new file mode 100644
index 0000000..4229e81
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+/**
+ * Interface which has to be implemented for encoding and decoding row keys and
+ * columns.
+ */
+public interface KeyConverter<T> {
+  /**
+   * Encodes a key as a byte array.
+   *
+   * @param key key to be encoded.
+   * @return a byte array.
+   */
+  byte[] encode(T key);
+
+  /**
+   * Decodes a byte array and returns a key of type T.
+   *
+   * @param bytes byte representation
+   * @return an object(key) of type T which has been constructed after decoding
+   * the bytes.
+   */
+  T decode(byte[] bytes);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
new file mode 100644
index 0000000..1f52a7b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+/**
+ * Interface which has to be implemented for encoding and decoding row keys or
+ * column qualifiers as string.
+ */
+public interface KeyConverterToString<T> {
+  /**
+   * Encode key as string.
+   * @param key of type T to be encoded as string.
+   * @return encoded value as string.
+   */
+  String encodeAsString(T key);
+
+  /**
+   * Decode row key from string to a key of type T.
+   * @param encodedKey string representation of row key
+   * @return type T which has been constructed after decoding string.
+   */
+  T decodeFromString(String encodedKey);
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HADOOP-6852. apparent bug in concatenated-bzip2 support (decoding). Contributed by Zsolt Venczel.

Posted by ha...@apache.org.
HADOOP-6852. apparent bug in concatenated-bzip2 support (decoding). Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bc3351e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bc3351e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bc3351e

Branch: refs/heads/HDFS-12996
Commit: 2bc3351eaf240ea685bcf5042d79f1554bf89e00
Parents: 92cbbfe
Author: Sean Mackrory <ma...@apache.org>
Authored: Wed Feb 21 12:53:18 2018 -0700
Committer: Sean Mackrory <ma...@apache.org>
Committed: Wed Feb 21 12:57:14 2018 -0700

----------------------------------------------------------------------
 .../hadoop-client-minicluster/pom.xml           |   1 +
 .../apache/hadoop/io/compress/BZip2Codec.java   |   3 +-
 .../mapred/TestConcatenatedCompressedInput.java |  84 +++++++++----------
 .../src/test/resources/testdata/concat.bz2      | Bin 0 -> 208 bytes
 .../src/test/resources/testdata/concat.gz       | Bin 0 -> 148 bytes
 .../testdata/testCompressThenConcat.txt.bz2     | Bin 0 -> 3056 bytes
 .../testdata/testCompressThenConcat.txt.gz      | Bin 0 -> 3413 bytes
 .../testdata/testConcatThenCompress.txt.bz2     | Bin 0 -> 2567 bytes
 .../testdata/testConcatThenCompress.txt.gz      | Bin 0 -> 2734 bytes
 9 files changed, 42 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 905d53a..a443648 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -615,6 +615,7 @@
                       <excludes>
                         <exclude>testjar/*</exclude>
                         <exclude>testshell/*</exclude>
+                        <exclude>testdata/*</exclude>
                       </excludes>
                     </filter>
                     <!-- Mockito tries to include its own unrelocated copy of hamcrest. :( -->

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
index 3c78cfc..99590ed 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
@@ -180,7 +180,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
       new DecompressorStream(in, decompressor,
                              conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
                                  IO_FILE_BUFFER_SIZE_DEFAULT)) :
-      new BZip2CompressionInputStream(in);
+      new BZip2CompressionInputStream(
+              in, 0L, Long.MAX_VALUE, READ_MODE.BYBLOCK);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
index 977d083..af6b952 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java
@@ -18,18 +18,6 @@
 
 package org.apache.hadoop.mapred;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayInputStream;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.zip.Inflater;
-
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
@@ -42,16 +30,26 @@ import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.junit.After;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-@Ignore
+import java.io.ByteArrayInputStream;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.zip.Inflater;
+
+import static org.junit.Assert.*;
+
+/**
+ * Test class for concatenated {@link CompressionInputStream}.
+ */
 public class TestConcatenatedCompressedInput {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestConcatenatedCompressedInput.class);
-  private static int MAX_LENGTH = 10000;
   private static JobConf defaultConf = new JobConf();
   private static FileSystem localFs = null;
 
@@ -85,13 +83,15 @@ public class TestConcatenatedCompressedInput {
   public void after() {
     ZlibFactory.loadNativeZLib();
   }
+
+  private static final String DEFAULT_WORK_DIR = "target/test-classes/testdata";
   private static Path workDir = localFs.makeQualified(new Path(
-      System.getProperty("test.build.data", "/tmp"),
+      System.getProperty("test.build.data", DEFAULT_WORK_DIR),
       "TestConcatenatedCompressedInput"));
 
   private static LineReader makeStream(String str) throws IOException {
-    return new LineReader(new ByteArrayInputStream(str.getBytes("UTF-8")),
-                          defaultConf);
+    return new LineReader(new ByteArrayInputStream(
+            str.getBytes("UTF-8")), defaultConf);
   }
 
   private static void writeFile(FileSystem fs, Path name,
@@ -190,7 +190,8 @@ public class TestConcatenatedCompressedInput {
 
     // copy prebuilt (correct!) version of concat.gz to HDFS
     final String fn = "concat" + gzip.getDefaultExtension();
-    Path fnLocal = new Path(System.getProperty("test.concat.data", "/tmp"), fn);
+    Path fnLocal = new Path(
+            System.getProperty("test.concat.data", DEFAULT_WORK_DIR), fn);
     Path fnHDFS  = new Path(workDir, fn);
     localFs.copyFromLocalFile(fnLocal, fnHDFS);
 
@@ -227,7 +228,7 @@ public class TestConcatenatedCompressedInput {
   @Test
   public void testPrototypeInflaterGzip() throws IOException {
     CompressionCodec gzip = new GzipCodec();  // used only for file extension
-    localFs.delete(workDir, true);            // localFs = FileSystem instance
+    localFs.delete(workDir, true); // localFs = FileSystem instance
 
     System.out.println(COLOR_BR_BLUE + "testPrototypeInflaterGzip() using " +
       "non-native/Java Inflater and manual gzip header/trailer parsing" +
@@ -235,7 +236,8 @@ public class TestConcatenatedCompressedInput {
 
     // copy prebuilt (correct!) version of concat.gz to HDFS
     final String fn = "concat" + gzip.getDefaultExtension();
-    Path fnLocal = new Path(System.getProperty("test.concat.data", "/tmp"), fn);
+    Path fnLocal = new Path(
+            System.getProperty("test.concat.data", DEFAULT_WORK_DIR), fn);
     Path fnHDFS  = new Path(workDir, fn);
     localFs.copyFromLocalFile(fnLocal, fnHDFS);
 
@@ -326,14 +328,16 @@ public class TestConcatenatedCompressedInput {
 
     // copy single-member test file to HDFS
     String fn1 = "testConcatThenCompress.txt" + gzip.getDefaultExtension();
-    Path fnLocal1 = new Path(System.getProperty("test.concat.data","/tmp"),fn1);
+    Path fnLocal1 = new Path(
+            System.getProperty("test.concat.data", DEFAULT_WORK_DIR), fn1);
     Path fnHDFS1  = new Path(workDir, fn1);
     localFs.copyFromLocalFile(fnLocal1, fnHDFS1);
 
     // copy multiple-member test file to HDFS
     // (actually in "seekable gzip" format, a la JIRA PIG-42)
     String fn2 = "testCompressThenConcat.txt" + gzip.getDefaultExtension();
-    Path fnLocal2 = new Path(System.getProperty("test.concat.data","/tmp"),fn2);
+    Path fnLocal2 = new Path(
+            System.getProperty("test.concat.data", DEFAULT_WORK_DIR), fn2);
     Path fnHDFS2  = new Path(workDir, fn2);
     localFs.copyFromLocalFile(fnLocal2, fnHDFS2);
 
@@ -439,7 +443,8 @@ public class TestConcatenatedCompressedInput {
     InputSplit[] splits = format.getSplits(jConf, 100);
     assertEquals("compressed splits == 2", 2, splits.length);
     FileSplit tmp = (FileSplit) splits[0];
-    if (tmp.getPath().getName().equals("testCompressThenConcat.txt.gz")) {
+    if (tmp.getPath()
+            .getName().equals("testdata/testCompressThenConcat.txt.gz")) {
       System.out.println("  (swapping)");
       splits[0] = splits[1];
       splits[1] = tmp;
@@ -481,7 +486,8 @@ public class TestConcatenatedCompressedInput {
 
     // copy prebuilt (correct!) version of concat.bz2 to HDFS
     final String fn = "concat" + bzip2.getDefaultExtension();
-    Path fnLocal = new Path(System.getProperty("test.concat.data", "/tmp"), fn);
+    Path fnLocal = new Path(
+            System.getProperty("test.concat.data", DEFAULT_WORK_DIR), fn);
     Path fnHDFS  = new Path(workDir, fn);
     localFs.copyFromLocalFile(fnLocal, fnHDFS);
 
@@ -531,13 +537,15 @@ public class TestConcatenatedCompressedInput {
 
     // copy single-member test file to HDFS
     String fn1 = "testConcatThenCompress.txt" + bzip2.getDefaultExtension();
-    Path fnLocal1 = new Path(System.getProperty("test.concat.data","/tmp"),fn1);
+    Path fnLocal1 = new Path(
+            System.getProperty("test.concat.data", DEFAULT_WORK_DIR), fn1);
     Path fnHDFS1  = new Path(workDir, fn1);
     localFs.copyFromLocalFile(fnLocal1, fnHDFS1);
 
     // copy multiple-member test file to HDFS
     String fn2 = "testCompressThenConcat.txt" + bzip2.getDefaultExtension();
-    Path fnLocal2 = new Path(System.getProperty("test.concat.data","/tmp"),fn2);
+    Path fnLocal2 = new Path(
+            System.getProperty("test.concat.data", DEFAULT_WORK_DIR), fn2);
     Path fnHDFS2  = new Path(workDir, fn2);
     localFs.copyFromLocalFile(fnLocal2, fnHDFS2);
 
@@ -549,21 +557,6 @@ public class TestConcatenatedCompressedInput {
     assertEquals("concat bytes available", 2567, in1.available());
     assertEquals("concat bytes available", 3056, in2.available());
 
-/*
-    // FIXME
-    // The while-loop below dies at the beginning of the 2nd concatenated
-    // member (after 17 lines successfully read) with:
-    //
-    //   java.io.IOException: bad block header
-    //   at org.apache.hadoop.io.compress.bzip2.CBZip2InputStream.initBlock(
-    //   CBZip2InputStream.java:527)
-    //
-    // It is not critical to concatenated-gzip support, HADOOP-6835, so it's
-    // simply commented out for now (and HADOOP-6852 filed).  If and when the
-    // latter issue is resolved--perhaps by fixing an error here--this code
-    // should be reenabled.  Note that the doMultipleBzip2BufferSizes() test
-    // below uses the same testCompressThenConcat.txt.bz2 file but works fine.
-
     CompressionInputStream cin2 = bzip2.createInputStream(in2);
     LineReader in = new LineReader(cin2);
     Text out = new Text();
@@ -578,7 +571,6 @@ public class TestConcatenatedCompressedInput {
                  5346, totalBytes);
     assertEquals("total uncompressed lines in concatenated test file",
                  84, lineNum);
- */
 
     // test CBZip2InputStream with lots of different input-buffer sizes
     doMultipleBzip2BufferSizes(jobConf);
@@ -645,7 +637,8 @@ public class TestConcatenatedCompressedInput {
 
   // this tests both files (testCompressThenConcat, testConcatThenCompress); all
   // should work with existing Java bzip2 decoder and any future native version
-  private static void doSingleBzip2BufferSize(JobConf jConf) throws IOException {
+  private static void doSingleBzip2BufferSize(JobConf jConf)
+          throws IOException {
     TextInputFormat format = new TextInputFormat();
     format.configure(jConf);
     format.setMinSplitSize(5500);  // work around 256-byte/22-splits issue
@@ -654,7 +647,8 @@ public class TestConcatenatedCompressedInput {
     InputSplit[] splits = format.getSplits(jConf, 100);
     assertEquals("compressed splits == 2", 2, splits.length);
     FileSplit tmp = (FileSplit) splits[0];
-    if (tmp.getPath().getName().equals("testCompressThenConcat.txt.gz")) {
+    if (tmp.getPath()
+            .getName().equals("testdata/testCompressThenConcat.txt.gz")) {
       System.out.println("  (swapping)");
       splits[0] = splits[1];
       splits[1] = tmp;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/concat.bz2
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/concat.bz2 b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/concat.bz2
new file mode 100644
index 0000000..f31fb0c
Binary files /dev/null and b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/concat.bz2 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/concat.gz
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/concat.gz b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/concat.gz
new file mode 100644
index 0000000..53d5a07
Binary files /dev/null and b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/concat.gz differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testCompressThenConcat.txt.bz2
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testCompressThenConcat.txt.bz2 b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testCompressThenConcat.txt.bz2
new file mode 100644
index 0000000..a21c0e2
Binary files /dev/null and b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testCompressThenConcat.txt.bz2 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testCompressThenConcat.txt.gz
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testCompressThenConcat.txt.gz b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testCompressThenConcat.txt.gz
new file mode 100644
index 0000000..75e5f8c
Binary files /dev/null and b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testCompressThenConcat.txt.gz differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testConcatThenCompress.txt.bz2
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testConcatThenCompress.txt.bz2 b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testConcatThenCompress.txt.bz2
new file mode 100644
index 0000000..5983e52
Binary files /dev/null and b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testConcatThenCompress.txt.bz2 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bc3351e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testConcatThenCompress.txt.gz
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testConcatThenCompress.txt.gz b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testConcatThenCompress.txt.gz
new file mode 100644
index 0000000..6e8eaa5
Binary files /dev/null and b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/resources/testdata/testConcatThenCompress.txt.gz differ


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: YARN-7937. Fix http method name in Cluster Application Timeout Update API example request. Contributed by Charan Hebri.

Posted by ha...@apache.org.
YARN-7937. Fix http method name in Cluster Application Timeout Update API example request. Contributed by Charan Hebri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87bdde69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87bdde69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87bdde69

Branch: refs/heads/HDFS-12996
Commit: 87bdde69431c19a22d79a767071f6ea47e1ceb3d
Parents: 9af30d4
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Sun Feb 18 14:01:23 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Sun Feb 18 14:01:23 2018 +0530

----------------------------------------------------------------------
 .../hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md    | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bdde69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index 09e4727..c43fe14 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -4366,7 +4366,7 @@ HTTP Request:
 
 ```json
       Accept: application/json
-      GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/timeout
+      PUT http://rm-http-address:port/ws/v1/cluster/apps/{appid}/timeout
       Content-Type: application/json
         {
         "timeout":
@@ -4404,7 +4404,7 @@ HTTP Request:
 
 ```xml
       Accept: application/xml
-      GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/timeout
+      PUT http://rm-http-address:port/ws/v1/cluster/apps/{appid}/timeout
       Content-Type: application/xml
         <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
         <timeout>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: YARN-7940. Fixed a bug in ServiceAM ZooKeeper initialization. (Contributed by Billie Rinaldi)

Posted by ha...@apache.org.
YARN-7940. Fixed a bug in ServiceAM ZooKeeper initialization.
           (Contributed by Billie Rinaldi)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7280c5af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7280c5af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7280c5af

Branch: refs/heads/HDFS-12996
Commit: 7280c5af82d36a9be15448293210d871f680f55e
Parents: 8896d20
Author: Eric Yang <ey...@apache.org>
Authored: Tue Feb 20 14:12:58 2018 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Tue Feb 20 14:12:58 2018 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/registry/client/impl/zk/CuratorService.java | 3 +++
 .../apache/hadoop/registry/client/impl/zk/RegistrySecurity.java   | 2 ++
 2 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280c5af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
index c81a0ee..2eb7aa5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java
@@ -288,6 +288,9 @@ public class CuratorService extends CompositeService
       registrySecurity.applySecurityEnvironment(builder);
       //log them
       securityConnectionDiagnostics = buildSecurityDiagnostics();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(securityConnectionDiagnostics);
+      }
       framework = builder.build();
       framework.start();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7280c5af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index 521d8a9..bb829d8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -762,6 +762,8 @@ public class RegistrySecurity extends AbstractService {
           LOG.info(
               "Enabling ZK sasl client: jaasClientEntry = " + jaasClientEntry
                   + ", principal = " + principal + ", keytab = " + keytab);
+          break;
+
         default:
           clearZKSaslClientProperties();
           break;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
deleted file mode 100644
index 785a243..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The sub application table has column families:
- * info, config and metrics.
- * Info stores information about a timeline entity object
- * config stores configuration data of a timeline entity object
- * metrics stores the metrics of a timeline entity object
- *
- * Example sub application table record:
- *
- * <pre>
- * |-------------------------------------------------------------------------|
- * |  Row          | Column Family             | Column Family| Column Family|
- * |  key          | info                      | metrics      | config       |
- * |-------------------------------------------------------------------------|
- * | subAppUserId! | id:entityId               | metricId1:   | configKey1:  |
- * | clusterId!    | type:entityType           | metricValue1 | configValue1 |
- * | entityType!   |                           | @timestamp1  |              |
- * | idPrefix!|    |                           |              | configKey2:  |
- * | entityId!     | created_time:             | metricId1:   | configValue2 |
- * | userId        | 1392993084018             | metricValue2 |              |
- * |               |                           | @timestamp2  |              |
- * |               | i!infoKey:                |              |              |
- * |               | infoValue                 | metricId1:   |              |
- * |               |                           | metricValue1 |              |
- * |               |                           | @timestamp2  |              |
- * |               | e!eventId=timestamp=      |              |              |
- * |               | infoKey:                  |              |              |
- * |               | eventInfoValue            |              |              |
- * |               |                           |              |              |
- * |               | r!relatesToKey:           |              |              |
- * |               | id3=id4=id5               |              |              |
- * |               |                           |              |              |
- * |               | s!isRelatedToKey          |              |              |
- * |               | id7=id9=id6               |              |              |
- * |               |                           |              |              |
- * |               | flowVersion:              |              |              |
- * |               | versionValue              |              |              |
- * |-------------------------------------------------------------------------|
- * </pre>
- */
-public class SubApplicationTable extends BaseTable<SubApplicationTable> {
-  /** sub app prefix. */
-  private static final String PREFIX =
-      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "subapplication";
-
-  /** config param name that specifies the subapplication table name. */
-  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
-
-  /**
-   * config param name that specifies the TTL for metrics column family in
-   * subapplication table.
-   */
-  private static final String METRICS_TTL_CONF_NAME = PREFIX
-      + ".table.metrics.ttl";
-
-  /**
-   * config param name that specifies max-versions for
-   * metrics column family in subapplication table.
-   */
-  private static final String METRICS_MAX_VERSIONS =
-      PREFIX + ".table.metrics.max-versions";
-
-  /** default value for subapplication table name. */
-  public static final String DEFAULT_TABLE_NAME =
-      "timelineservice.subapplication";
-
-  /** default TTL is 30 days for metrics timeseries. */
-  private static final int DEFAULT_METRICS_TTL = 2592000;
-
-  /** default max number of versions. */
-  private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      SubApplicationTable.class);
-
-  public SubApplicationTable() {
-    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
-   * (org.apache.hadoop.hbase.client.Admin,
-   * org.apache.hadoop.conf.Configuration)
-   */
-  public void createTable(Admin admin, Configuration hbaseConf)
-      throws IOException {
-
-    TableName table = getTableName(hbaseConf);
-    if (admin.tableExists(table)) {
-      // do not disable / delete existing table
-      // similar to the approach taken by map-reduce jobs when
-      // output directory exists
-      throw new IOException("Table " + table.getNameAsString()
-          + " already exists.");
-    }
-
-    HTableDescriptor subAppTableDescp = new HTableDescriptor(table);
-    HColumnDescriptor infoCF =
-        new HColumnDescriptor(SubApplicationColumnFamily.INFO.getBytes());
-    infoCF.setBloomFilterType(BloomType.ROWCOL);
-    subAppTableDescp.addFamily(infoCF);
-
-    HColumnDescriptor configCF =
-        new HColumnDescriptor(SubApplicationColumnFamily.CONFIGS.getBytes());
-    configCF.setBloomFilterType(BloomType.ROWCOL);
-    configCF.setBlockCacheEnabled(true);
-    subAppTableDescp.addFamily(configCF);
-
-    HColumnDescriptor metricsCF =
-        new HColumnDescriptor(SubApplicationColumnFamily.METRICS.getBytes());
-    subAppTableDescp.addFamily(metricsCF);
-    metricsCF.setBlockCacheEnabled(true);
-    // always keep 1 version (the latest)
-    metricsCF.setMinVersions(1);
-    metricsCF.setMaxVersions(
-        hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
-    metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
-        DEFAULT_METRICS_TTL));
-    subAppTableDescp.setRegionSplitPolicyClassName(
-        "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
-    subAppTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
-        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
-    admin.createTable(subAppTableDescp,
-        TimelineHBaseSchemaConstants.getUsernameSplits());
-    LOG.info("Status of table creation for " + table.getNameAsString() + "="
-        + admin.tableExists(table));
-  }
-
-  /**
-   * @param metricsTTL time to live parameter for the metricss in this table.
-   * @param hbaseConf configururation in which to set the metrics TTL config
-   *          variable.
-   */
-  public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
-    hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
deleted file mode 100644
index 52cc399..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication
- * contains classes related to implementation for subapplication table.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java
deleted file mode 100644
index 73bc29e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Test for HBaseTimelineStorageUtils.convertApplicationIdToString(),
- * a custom conversion from ApplicationId to String that avoids the
- * incompatibility issue caused by mixing hadoop-common 2.5.1 and
- * hadoop-yarn-api 3.0. See YARN-6905.
- */
-public class TestCustomApplicationIdConversion {
-  @Test
-  public void testConvertAplicationIdToString() {
-    ApplicationId applicationId = ApplicationId.newInstance(0, 1);
-    String applicationIdStr =
-        HBaseTimelineStorageUtils.convertApplicationIdToString(applicationId);
-    Assert.assertEquals(applicationId,
-        ApplicationId.fromString(applicationIdStr));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
deleted file mode 100644
index 402a89b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.junit.Test;
-
-/**
- * Unit tests for HBaseTimelineStorageUtils static methos.
- */
-public class TestHBaseTimelineStorageUtils {
-
-  @Test(expected=NullPointerException.class)
-  public void testGetTimelineServiceHBaseConfNullArgument() throws Exception {
-    HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
deleted file mode 100644
index 1bd363f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.junit.Test;
-
-/**
- * Unit tests for key converters for various tables' row keys.
- *
- */
-public class TestKeyConverters {
-
-  @Test
-  public void testAppIdKeyConverter() {
-    AppIdKeyConverter appIdKeyConverter = new AppIdKeyConverter();
-    long currentTs = System.currentTimeMillis();
-    ApplicationId appId1 = ApplicationId.newInstance(currentTs, 1);
-    ApplicationId appId2 = ApplicationId.newInstance(currentTs, 2);
-    ApplicationId appId3 = ApplicationId.newInstance(currentTs + 300, 1);
-    String appIdStr1 = appId1.toString();
-    String appIdStr2 = appId2.toString();
-    String appIdStr3 = appId3.toString();
-    byte[] appIdBytes1 = appIdKeyConverter.encode(appIdStr1);
-    byte[] appIdBytes2 = appIdKeyConverter.encode(appIdStr2);
-    byte[] appIdBytes3 = appIdKeyConverter.encode(appIdStr3);
-    // App ids' should be encoded in a manner wherein descending order
-    // is maintained.
-    assertTrue(
-        "Ordering of app ids' is incorrect",
-        Bytes.compareTo(appIdBytes1, appIdBytes2) > 0
-            && Bytes.compareTo(appIdBytes1, appIdBytes3) > 0
-            && Bytes.compareTo(appIdBytes2, appIdBytes3) > 0);
-    String decodedAppId1 = appIdKeyConverter.decode(appIdBytes1);
-    String decodedAppId2 = appIdKeyConverter.decode(appIdBytes2);
-    String decodedAppId3 = appIdKeyConverter.decode(appIdBytes3);
-    assertTrue("Decoded app id is not same as the app id encoded",
-        appIdStr1.equals(decodedAppId1));
-    assertTrue("Decoded app id is not same as the app id encoded",
-        appIdStr2.equals(decodedAppId2));
-    assertTrue("Decoded app id is not same as the app id encoded",
-        appIdStr3.equals(decodedAppId3));
-  }
-
-  @Test
-  public void testEventColumnNameConverter() {
-    String eventId = "=foo_=eve=nt=";
-    byte[] valSepBytes = Bytes.toBytes(Separator.VALUES.getValue());
-    byte[] maxByteArr =
-        Bytes.createMaxByteArray(Bytes.SIZEOF_LONG - valSepBytes.length);
-    byte[] ts = Bytes.add(valSepBytes, maxByteArr);
-    Long eventTs = Bytes.toLong(ts);
-    byte[] byteEventColName =
-        new EventColumnName(eventId, eventTs, null).getColumnQualifier();
-    KeyConverter<EventColumnName> eventColumnNameConverter =
-        new EventColumnNameConverter();
-    EventColumnName eventColName =
-        eventColumnNameConverter.decode(byteEventColName);
-    assertEquals(eventId, eventColName.getId());
-    assertEquals(eventTs, eventColName.getTimestamp());
-    assertNull(eventColName.getInfoKey());
-
-    String infoKey = "f=oo_event_in=fo=_key";
-    byteEventColName =
-        new EventColumnName(eventId, eventTs, infoKey).getColumnQualifier();
-    eventColName = eventColumnNameConverter.decode(byteEventColName);
-    assertEquals(eventId, eventColName.getId());
-    assertEquals(eventTs, eventColName.getTimestamp());
-    assertEquals(infoKey, eventColName.getInfoKey());
-  }
-
-  @Test
-  public void testLongKeyConverter() {
-    LongKeyConverter longKeyConverter = new LongKeyConverter();
-    confirmLongKeyConverter(longKeyConverter, Long.MIN_VALUE);
-    confirmLongKeyConverter(longKeyConverter, -1234567890L);
-    confirmLongKeyConverter(longKeyConverter, -128L);
-    confirmLongKeyConverter(longKeyConverter, -127L);
-    confirmLongKeyConverter(longKeyConverter, -1L);
-    confirmLongKeyConverter(longKeyConverter, 0L);
-    confirmLongKeyConverter(longKeyConverter, 1L);
-    confirmLongKeyConverter(longKeyConverter, 127L);
-    confirmLongKeyConverter(longKeyConverter, 128L);
-    confirmLongKeyConverter(longKeyConverter, 1234567890L);
-    confirmLongKeyConverter(longKeyConverter, Long.MAX_VALUE);
-  }
-
-  private void confirmLongKeyConverter(LongKeyConverter longKeyConverter,
-      Long testValue) {
-    Long decoded = longKeyConverter.decode(longKeyConverter.encode(testValue));
-    assertEquals(testValue, decoded);
-  }
-
-  @Test
-  public void testStringKeyConverter() {
-    StringKeyConverter stringKeyConverter = new StringKeyConverter();
-    String phrase = "QuackAttack now!";
-
-    for (int i = 0; i < phrase.length(); i++) {
-      String sub = phrase.substring(i, phrase.length());
-      confirmStrignKeyConverter(stringKeyConverter, sub);
-      confirmStrignKeyConverter(stringKeyConverter, sub + sub);
-    }
-  }
-
-  private void confirmStrignKeyConverter(StringKeyConverter stringKeyConverter,
-      String testValue) {
-    String decoded =
-        stringKeyConverter.decode(stringKeyConverter.encode(testValue));
-    assertEquals(testValue, decoded);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
deleted file mode 100644
index 4770238..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
+++ /dev/null
@@ -1,276 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
-import org.junit.Test;
-
-
-/**
- * Class to test the row key structures for various tables.
- *
- */
-public class TestRowKeys {
-
-  private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
-  private final static byte[] QUALIFIER_SEP_BYTES = Bytes
-      .toBytes(QUALIFIER_SEP);
-  private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
-  private final static String USER = QUALIFIER_SEP + "user";
-  private final static String SUB_APP_USER = QUALIFIER_SEP + "subAppUser";
-  private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
-      + QUALIFIER_SEP;
-  private final static Long FLOW_RUN_ID;
-  private final static String APPLICATION_ID;
-  static {
-    long runid = Long.MAX_VALUE - 900L;
-    byte[] longMaxByteArr = Bytes.toBytes(Long.MAX_VALUE);
-    byte[] byteArr = Bytes.toBytes(runid);
-    int sepByteLen = QUALIFIER_SEP_BYTES.length;
-    if (sepByteLen <= byteArr.length) {
-      for (int i = 0; i < sepByteLen; i++) {
-        byteArr[i] = (byte) (longMaxByteArr[i] - QUALIFIER_SEP_BYTES[i]);
-      }
-    }
-    FLOW_RUN_ID = Bytes.toLong(byteArr);
-    long clusterTs = System.currentTimeMillis();
-    byteArr = Bytes.toBytes(clusterTs);
-    if (sepByteLen <= byteArr.length) {
-      for (int i = 0; i < sepByteLen; i++) {
-        byteArr[byteArr.length - sepByteLen + i] =
-            (byte) (longMaxByteArr[byteArr.length - sepByteLen + i] -
-                QUALIFIER_SEP_BYTES[i]);
-      }
-    }
-    clusterTs = Bytes.toLong(byteArr);
-    int seqId = 222;
-    APPLICATION_ID = ApplicationId.newInstance(clusterTs, seqId).toString();
-  }
-
-  private static void verifyRowPrefixBytes(byte[] byteRowKeyPrefix) {
-    int sepLen = QUALIFIER_SEP_BYTES.length;
-    for (int i = 0; i < sepLen; i++) {
-      assertTrue(
-          "Row key prefix not encoded properly.",
-          byteRowKeyPrefix[byteRowKeyPrefix.length - sepLen + i] ==
-              QUALIFIER_SEP_BYTES[i]);
-    }
-  }
-
-  @Test
-  public void testApplicationRowKey() {
-    byte[] byteRowKey =
-        new ApplicationRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-            APPLICATION_ID).getRowKey();
-    ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(byteRowKey);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(USER, rowKey.getUserId());
-    assertEquals(FLOW_NAME, rowKey.getFlowName());
-    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
-    assertEquals(APPLICATION_ID, rowKey.getAppId());
-
-    byte[] byteRowKeyPrefix =
-        new ApplicationRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID)
-            .getRowKeyPrefix();
-    byte[][] splits =
-        Separator.QUALIFIERS.split(byteRowKeyPrefix,
-            new int[] {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
-                Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
-                Separator.VARIABLE_SIZE});
-    assertEquals(5, splits.length);
-    assertEquals(0, splits[4].length);
-    assertEquals(FLOW_NAME,
-        Separator.QUALIFIERS.decode(Bytes.toString(splits[2])));
-    assertEquals(FLOW_RUN_ID,
-        (Long) LongConverter.invertLong(Bytes.toLong(splits[3])));
-    verifyRowPrefixBytes(byteRowKeyPrefix);
-
-    byteRowKeyPrefix =
-        new ApplicationRowKeyPrefix(CLUSTER, USER, FLOW_NAME).getRowKeyPrefix();
-    splits =
-        Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] {
-            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
-            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
-    assertEquals(4, splits.length);
-    assertEquals(0, splits[3].length);
-    assertEquals(FLOW_NAME,
-        Separator.QUALIFIERS.decode(Bytes.toString(splits[2])));
-    verifyRowPrefixBytes(byteRowKeyPrefix);
-  }
-
-  /**
-   * Tests the converters indirectly through the public methods of the
-   * corresponding rowkey.
-   */
-  @Test
-  public void testAppToFlowRowKey() {
-    byte[] byteRowKey = new AppToFlowRowKey(APPLICATION_ID).getRowKey();
-    AppToFlowRowKey rowKey = AppToFlowRowKey.parseRowKey(byteRowKey);
-    assertEquals(APPLICATION_ID, rowKey.getAppId());
-  }
-
-  @Test
-  public void testEntityRowKey() {
-    TimelineEntity entity = new TimelineEntity();
-    entity.setId("!ent!ity!!id!");
-    entity.setType("entity!Type");
-    entity.setIdPrefix(54321);
-
-    byte[] byteRowKey =
-        new EntityRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID,
-            entity.getType(), entity.getIdPrefix(),
-            entity.getId()).getRowKey();
-    EntityRowKey rowKey = EntityRowKey.parseRowKey(byteRowKey);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(USER, rowKey.getUserId());
-    assertEquals(FLOW_NAME, rowKey.getFlowName());
-    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
-    assertEquals(APPLICATION_ID, rowKey.getAppId());
-    assertEquals(entity.getType(), rowKey.getEntityType());
-    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
-    assertEquals(entity.getId(), rowKey.getEntityId());
-
-    byte[] byteRowKeyPrefix =
-        new EntityRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-            APPLICATION_ID, entity.getType(), null, null)
-                .getRowKeyPrefix();
-    byte[][] splits =
-        Separator.QUALIFIERS.split(
-            byteRowKeyPrefix,
-            new int[] {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
-                Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
-                AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE,
-                Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE });
-    assertEquals(7, splits.length);
-    assertEquals(APPLICATION_ID, new AppIdKeyConverter().decode(splits[4]));
-    assertEquals(entity.getType(),
-        Separator.QUALIFIERS.decode(Bytes.toString(splits[5])));
-    verifyRowPrefixBytes(byteRowKeyPrefix);
-
-    byteRowKeyPrefix =
-        new EntityRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
-            APPLICATION_ID).getRowKeyPrefix();
-    splits =
-        Separator.QUALIFIERS.split(
-            byteRowKeyPrefix,
-            new int[] {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
-                Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
-                AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE});
-    assertEquals(6, splits.length);
-    assertEquals(0, splits[5].length);
-    AppIdKeyConverter appIdKeyConverter = new AppIdKeyConverter();
-    assertEquals(APPLICATION_ID, appIdKeyConverter.decode(splits[4]));
-    verifyRowPrefixBytes(byteRowKeyPrefix);
-  }
-
-  @Test
-  public void testFlowActivityRowKey() {
-    Long ts = 1459900830000L;
-    Long dayTimestamp = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
-    byte[] byteRowKey =
-        new FlowActivityRowKey(CLUSTER, ts, USER, FLOW_NAME).getRowKey();
-    FlowActivityRowKey rowKey = FlowActivityRowKey.parseRowKey(byteRowKey);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
-    assertEquals(USER, rowKey.getUserId());
-    assertEquals(FLOW_NAME, rowKey.getFlowName());
-
-    byte[] byteRowKeyPrefix =
-        new FlowActivityRowKeyPrefix(CLUSTER).getRowKeyPrefix();
-    byte[][] splits =
-        Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] {
-            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
-    assertEquals(2, splits.length);
-    assertEquals(0, splits[1].length);
-    assertEquals(CLUSTER,
-        Separator.QUALIFIERS.decode(Bytes.toString(splits[0])));
-    verifyRowPrefixBytes(byteRowKeyPrefix);
-
-    byteRowKeyPrefix =
-        new FlowActivityRowKeyPrefix(CLUSTER, ts).getRowKeyPrefix();
-    splits =
-        Separator.QUALIFIERS.split(byteRowKeyPrefix,
-            new int[] {Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
-                Separator.VARIABLE_SIZE});
-    assertEquals(3, splits.length);
-    assertEquals(0, splits[2].length);
-    assertEquals(CLUSTER,
-        Separator.QUALIFIERS.decode(Bytes.toString(splits[0])));
-    assertEquals(ts,
-        (Long) LongConverter.invertLong(Bytes.toLong(splits[1])));
-    verifyRowPrefixBytes(byteRowKeyPrefix);
-  }
-
-  @Test
-  public void testFlowRunRowKey() {
-    byte[] byteRowKey =
-        new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID).getRowKey();
-    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(byteRowKey);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(USER, rowKey.getUserId());
-    assertEquals(FLOW_NAME, rowKey.getFlowName());
-    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
-
-    byte[] byteRowKeyPrefix =
-        new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, null).getRowKey();
-    byte[][] splits =
-        Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] {
-            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
-            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
-    assertEquals(4, splits.length);
-    assertEquals(0, splits[3].length);
-    assertEquals(FLOW_NAME,
-        Separator.QUALIFIERS.decode(Bytes.toString(splits[2])));
-    verifyRowPrefixBytes(byteRowKeyPrefix);
-  }
-
-  @Test
-  public void testSubAppRowKey() {
-    TimelineEntity entity = new TimelineEntity();
-    entity.setId("entity1");
-    entity.setType("DAG");
-    entity.setIdPrefix(54321);
-
-    byte[] byteRowKey =
-        new SubApplicationRowKey(SUB_APP_USER, CLUSTER,
-            entity.getType(), entity.getIdPrefix(),
-            entity.getId(), USER).getRowKey();
-    SubApplicationRowKey rowKey = SubApplicationRowKey.parseRowKey(byteRowKey);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(SUB_APP_USER, rowKey.getSubAppUserId());
-    assertEquals(entity.getType(), rowKey.getEntityType());
-    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
-    assertEquals(entity.getId(), rowKey.getEntityId());
-    assertEquals(USER, rowKey.getUserId());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
deleted file mode 100644
index 148cf56..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
-import org.junit.Test;
-
-/**
- * Test for row key as string.
- */
-public class TestRowKeysAsString {
-
-  private final static String CLUSTER =
-      "cl" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR + "uster"
-          + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
-  private final static String USER =
-      TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "user";
-  private final static String SUB_APP_USER =
-      TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "subAppUser";
-
-  private final static String FLOW_NAME =
-      "dummy_" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR
-          + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "flow"
-          + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
-  private final static Long FLOW_RUN_ID = System.currentTimeMillis();
-  private final static String APPLICATION_ID =
-      ApplicationId.newInstance(System.currentTimeMillis(), 1).toString();
-
-  @Test(timeout = 10000)
-  public void testApplicationRow() {
-    String rowKeyAsString = new ApplicationRowKey(CLUSTER, USER, FLOW_NAME,
-        FLOW_RUN_ID, APPLICATION_ID).getRowKeyAsString();
-    ApplicationRowKey rowKey =
-        ApplicationRowKey.parseRowKeyFromString(rowKeyAsString);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(USER, rowKey.getUserId());
-    assertEquals(FLOW_NAME, rowKey.getFlowName());
-    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
-    assertEquals(APPLICATION_ID, rowKey.getAppId());
-  }
-
-  @Test(timeout = 10000)
-  public void testEntityRowKey() {
-    char del = TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
-    char esc = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
-    String id = del + esc + "ent" + esc + del + "ity" + esc + del + esc + "id"
-        + esc + del + esc;
-    String type = "entity" + esc + del + esc + "Type";
-    TimelineEntity entity = new TimelineEntity();
-    entity.setId(id);
-    entity.setType(type);
-    entity.setIdPrefix(54321);
-
-    String rowKeyAsString =
-        new EntityRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID,
-            entity.getType(), entity.getIdPrefix(), entity.getId())
-                .getRowKeyAsString();
-    EntityRowKey rowKey = EntityRowKey.parseRowKeyFromString(rowKeyAsString);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(USER, rowKey.getUserId());
-    assertEquals(FLOW_NAME, rowKey.getFlowName());
-    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
-    assertEquals(APPLICATION_ID, rowKey.getAppId());
-    assertEquals(entity.getType(), rowKey.getEntityType());
-    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
-    assertEquals(entity.getId(), rowKey.getEntityId());
-
-  }
-
-  @Test(timeout = 10000)
-  public void testFlowActivityRowKey() {
-    Long ts = 1459900830000L;
-    Long dayTimestamp = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
-    String rowKeyAsString = new FlowActivityRowKey(CLUSTER, ts, USER, FLOW_NAME)
-        .getRowKeyAsString();
-    FlowActivityRowKey rowKey =
-        FlowActivityRowKey.parseRowKeyFromString(rowKeyAsString);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
-    assertEquals(USER, rowKey.getUserId());
-    assertEquals(FLOW_NAME, rowKey.getFlowName());
-  }
-
-  @Test(timeout = 10000)
-  public void testFlowRunRowKey() {
-    String rowKeyAsString =
-        new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID)
-            .getRowKeyAsString();
-    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKeyFromString(rowKeyAsString);
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(USER, rowKey.getUserId());
-    assertEquals(FLOW_NAME, rowKey.getFlowName());
-    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
-  }
-
-  @Test(timeout = 10000)
-  public void testSubApplicationRowKey() {
-    char del = TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
-    char esc = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
-    String id = del + esc + "ent" + esc + del + "ity" + esc + del + esc + "id"
-        + esc + del + esc;
-    String type = "entity" + esc + del + esc + "Type";
-    TimelineEntity entity = new TimelineEntity();
-    entity.setId(id);
-    entity.setType(type);
-    entity.setIdPrefix(54321);
-
-    String rowKeyAsString = new SubApplicationRowKey(SUB_APP_USER, CLUSTER,
-        entity.getType(), entity.getIdPrefix(), entity.getId(), USER)
-            .getRowKeyAsString();
-    SubApplicationRowKey rowKey = SubApplicationRowKey
-        .parseRowKeyFromString(rowKeyAsString);
-    assertEquals(SUB_APP_USER, rowKey.getSubAppUserId());
-    assertEquals(CLUSTER, rowKey.getClusterId());
-    assertEquals(entity.getType(), rowKey.getEntityType());
-    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
-    assertEquals(entity.getId(), rowKey.getEntityId());
-    assertEquals(USER, rowKey.getUserId());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
deleted file mode 100644
index 7d37206..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-
-public class TestSeparator {
-
-  private static String villain = "Dr. Heinz Doofenshmirtz";
-  private static String special =
-      ".   *   |   ?   +   \t   (   )   [   ]   {   }   ^   $  \\ \"  %";
-
-  /**
-   *
-   */
-  @Test
-  public void testEncodeDecodeString() {
-
-    for (Separator separator : Separator.values()) {
-      testEncodeDecode(separator, "");
-      testEncodeDecode(separator, " ");
-      testEncodeDecode(separator, "!");
-      testEncodeDecode(separator, "?");
-      testEncodeDecode(separator, "&");
-      testEncodeDecode(separator, "+");
-      testEncodeDecode(separator, "\t");
-      testEncodeDecode(separator, "Dr.");
-      testEncodeDecode(separator, "Heinz");
-      testEncodeDecode(separator, "Doofenshmirtz");
-      testEncodeDecode(separator, villain);
-      testEncodeDecode(separator, special);
-
-      assertNull(separator.encode(null));
-
-    }
-  }
-
-  private void testEncodeDecode(Separator separator, String token) {
-    String encoded = separator.encode(token);
-    String decoded = separator.decode(encoded);
-    String msg = "token:" + token + " separator:" + separator + ".";
-    assertEquals(msg, token, decoded);
-  }
-
-  @Test
-  public void testEncodeDecode() {
-    testEncodeDecode("Dr.", Separator.QUALIFIERS);
-    testEncodeDecode("Heinz", Separator.QUALIFIERS, Separator.QUALIFIERS);
-    testEncodeDecode("Doofenshmirtz", Separator.QUALIFIERS, null,
-        Separator.QUALIFIERS);
-    testEncodeDecode("&Perry", Separator.QUALIFIERS, Separator.VALUES, null);
-    testEncodeDecode("the ", Separator.QUALIFIERS, Separator.SPACE);
-    testEncodeDecode("Platypus...", (Separator) null);
-    testEncodeDecode("The what now ?!?", Separator.QUALIFIERS,
-        Separator.VALUES, Separator.SPACE);
-
-  }
-  @Test
-  public void testEncodedValues() {
-    testEncodeDecode("Double-escape %2$ and %9$ or %%2$ or %%3$, nor  %%%2$" +
-        "= no problem!",
-        Separator.QUALIFIERS, Separator.VALUES, Separator.SPACE, Separator.TAB);
-  }
-
-  @Test
-  public void testSplits() {
-    byte[] maxLongBytes = Bytes.toBytes(Long.MAX_VALUE);
-    byte[] maxIntBytes = Bytes.toBytes(Integer.MAX_VALUE);
-    for (Separator separator : Separator.values()) {
-      String str1 = "cl" + separator.getValue() + "us";
-      String str2 = separator.getValue() + "rst";
-      byte[] sepByteArr = Bytes.toBytes(separator.getValue());
-      byte[] longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
-          sepByteArr.length, Bytes.SIZEOF_LONG - sepByteArr.length));
-      byte[] intVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxIntBytes,
-          sepByteArr.length, Bytes.SIZEOF_INT - sepByteArr.length));
-      byte[] arr = separator.join(
-          Bytes.toBytes(separator.encode(str1)), longVal1Arr,
-          Bytes.toBytes(separator.encode(str2)), intVal1Arr);
-      int[] sizes = {Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
-          Separator.VARIABLE_SIZE, Bytes.SIZEOF_INT};
-      byte[][] splits = separator.split(arr, sizes);
-      assertEquals(4, splits.length);
-      assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
-      assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
-      assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
-      assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
-
-      longVal1Arr = Bytes.add(Bytes.copy(maxLongBytes, 0, Bytes.SIZEOF_LONG -
-          sepByteArr.length), sepByteArr);
-      intVal1Arr = Bytes.add(Bytes.copy(maxIntBytes, 0, Bytes.SIZEOF_INT -
-          sepByteArr.length), sepByteArr);
-      arr = separator.join(Bytes.toBytes(separator.encode(str1)), longVal1Arr,
-          Bytes.toBytes(separator.encode(str2)), intVal1Arr);
-      splits = separator.split(arr, sizes);
-      assertEquals(4, splits.length);
-      assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
-      assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
-      assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
-      assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
-
-      longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
-          sepByteArr.length, 4 - sepByteArr.length), sepByteArr);
-      longVal1Arr = Bytes.add(longVal1Arr, Bytes.copy(maxLongBytes, 4, 3 -
-              sepByteArr.length), sepByteArr);
-      arr = separator.join(Bytes.toBytes(separator.encode(str1)), longVal1Arr,
-          Bytes.toBytes(separator.encode(str2)), intVal1Arr);
-      splits = separator.split(arr, sizes);
-      assertEquals(4, splits.length);
-      assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
-      assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
-      assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
-      assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
-
-      arr = separator.join(Bytes.toBytes(separator.encode(str1)),
-          Bytes.toBytes(separator.encode(str2)), intVal1Arr, longVal1Arr);
-      int[] sizes1 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
-          Bytes.SIZEOF_INT, Bytes.SIZEOF_LONG};
-      splits = separator.split(arr, sizes1);
-      assertEquals(4, splits.length);
-      assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
-      assertEquals(str2, separator.decode(Bytes.toString(splits[1])));
-      assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[2]));
-      assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[3]));
-
-      try {
-        int[] sizes2 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
-            Bytes.SIZEOF_INT, 7};
-        splits = separator.split(arr, sizes2);
-        fail("Exception should have been thrown.");
-      } catch (IllegalArgumentException e) {}
-
-      try {
-        int[] sizes2 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, 2,
-            Bytes.SIZEOF_LONG};
-        splits = separator.split(arr, sizes2);
-        fail("Exception should have been thrown.");
-      } catch (IllegalArgumentException e) {}
-    }
-  }
-
-  /**
-   * Simple test to encode and decode using the same separators and confirm that
-   * we end up with the same as what we started with.
-   *
-   * @param token
-   * @param separators
-   */
-  private static void testEncodeDecode(String token, Separator... separators) {
-    byte[] encoded = Separator.encode(token, separators);
-    String decoded = Separator.decode(encoded, separators);
-    assertEquals(token, decoded);
-  }
-
-  @Test
-  public void testJoinStripped() {
-    List<String> stringList = new ArrayList<String>(0);
-    stringList.add("nothing");
-
-    String joined = Separator.VALUES.joinEncoded(stringList);
-    Iterable<String> split = Separator.VALUES.splitEncoded(joined);
-    assertTrue(Iterables.elementsEqual(stringList, split));
-
-    stringList = new ArrayList<String>(3);
-    stringList.add("a");
-    stringList.add("b?");
-    stringList.add("c");
-
-    joined = Separator.VALUES.joinEncoded(stringList);
-    split = Separator.VALUES.splitEncoded(joined);
-    assertTrue(Iterables.elementsEqual(stringList, split));
-
-    String[] stringArray1 = {"else"};
-    joined = Separator.VALUES.joinEncoded(stringArray1);
-    split = Separator.VALUES.splitEncoded(joined);
-    assertTrue(Iterables.elementsEqual(Arrays.asList(stringArray1), split));
-
-    String[] stringArray2 = {"d", "e?", "f"};
-    joined = Separator.VALUES.joinEncoded(stringArray2);
-    split = Separator.VALUES.splitEncoded(joined);
-    assertTrue(Iterables.elementsEqual(Arrays.asList(stringArray2), split));
-
-    List<String> empty = new ArrayList<String>(0);
-    split = Separator.VALUES.splitEncoded(null);
-    assertTrue(Iterables.elementsEqual(empty, split));
-
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: YARN-7675. [UI2] Support loading pre-2.8 version /scheduler REST response for queue page. Contributed by Gergely Novák.

Posted by ha...@apache.org.
YARN-7675. [UI2] Support loading pre-2.8 version /scheduler REST response for queue page. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc683952
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc683952
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc683952

Branch: refs/heads/HDFS-12996
Commit: cc683952d2c1730109497aa78dd53629e914d294
Parents: c36b4aa
Author: Sunil G <su...@apache.org>
Authored: Fri Feb 23 16:10:29 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Fri Feb 23 16:10:29 2018 +0530

----------------------------------------------------------------------
 .../serializers/yarn-queue/capacity-queue.js    | 29 ++++++++++++++++----
 1 file changed, 24 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc683952/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
index b171c6e..e838255 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-queue/capacity-queue.js
@@ -54,6 +54,28 @@ export default DS.JSONAPISerializer.extend({
         });
       }
 
+      var partitions = [];
+      var partitionMap = {};
+      if ("capacities" in payload) {
+        partitions = payload.capacities.queueCapacitiesByPartition.map(
+          cap => cap.partitionName || PARTITION_LABEL);
+        partitionMap = payload.capacities.queueCapacitiesByPartition.reduce((init, cap) => {
+          init[cap.partitionName || PARTITION_LABEL] = cap;
+          return init;
+        }, {});
+      } else {
+        partitions = [PARTITION_LABEL];
+        partitionMap[PARTITION_LABEL] = {
+          partitionName: "",
+          capacity: payload.capacity,
+          maxCapacity: payload.maxCapacity,
+          usedCapacity: payload.usedCapacity,
+          absoluteCapacity: 'absoluteCapacity' in payload ? payload.absoluteCapacity : payload.capacity,
+          absoluteMaxCapacity: 'absoluteMaxCapacity' in payload ? payload.absoluteMaxCapacity : payload.maxCapacity,
+          absoluteUsedCapacity: 'absoluteUsedCapacity' in payload ? payload.absoluteUsedCapacity : payload.usedCapacity,
+        };
+      }
+
       var fixedPayload = {
         id: id,
         type: primaryModelClass.modelName, // yarn-queue
@@ -74,11 +96,8 @@ export default DS.JSONAPISerializer.extend({
           numPendingApplications: payload.numPendingApplications,
           numActiveApplications: payload.numActiveApplications,
           resources: payload.resources,
-          partitions: payload.capacities.queueCapacitiesByPartition.map(cap => cap.partitionName || PARTITION_LABEL),
-          partitionMap: payload.capacities.queueCapacitiesByPartition.reduce((init, cap) => {
-            init[cap.partitionName || PARTITION_LABEL] = cap;
-            return init;
-          }, {}),
+          partitions: partitions,
+          partitionMap: partitionMap,
           type: "capacity",
         },
         // Relationships


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: YARN-7732. Support Generic AM Simulator from SynthGenerator. (Contributed by Young Chen via curino)

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
index 2b1971a..794cd47 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSynthJobGeneration.java
@@ -17,20 +17,25 @@
  */
 package org.apache.hadoop.yarn.sls;
 
+import org.apache.commons.math3.random.JDKRandomGenerator;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
 import org.apache.hadoop.yarn.sls.synthetic.SynthJob;
 import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.log4j.Logger;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import static org.junit.Assert.assertTrue;
 
+import static org.codehaus.jackson.JsonParser.Feature.INTERN_FIELD_NAMES;
+import static org.codehaus.jackson.map.DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES;
+
 /**
  * Simple test class driving the {@code SynthTraceJobProducer}, and validating
  * jobs produce are within expected range.
@@ -38,10 +43,60 @@ import static org.junit.Assert.assertTrue;
 public class TestSynthJobGeneration {
 
   public final static Logger LOG =
-      Logger.getLogger(TestSynthJobGeneration.class);
+      LoggerFactory.getLogger(TestSynthJobGeneration.class);
 
   @Test
-  public void test() throws IllegalArgumentException, IOException {
+  public void testWorkloadGenerateTime()
+      throws IllegalArgumentException, IOException {
+
+    String workloadJson = "{\"job_classes\": [], \"time_distribution\":["
+        + "{\"time\": 0, \"weight\": 1}, " + "{\"time\": 30, \"weight\": 0},"
+        + "{\"time\": 60, \"weight\": 2}," + "{\"time\": 90, \"weight\": 1}"
+        + "]}";
+
+    ObjectMapper mapper = new ObjectMapper();
+    mapper.configure(INTERN_FIELD_NAMES, true);
+    mapper.configure(FAIL_ON_UNKNOWN_PROPERTIES, false);
+    SynthTraceJobProducer.Workload wl =
+        mapper.readValue(workloadJson, SynthTraceJobProducer.Workload.class);
+
+    JDKRandomGenerator rand = new JDKRandomGenerator();
+    rand.setSeed(0);
+
+    wl.init(rand);
+
+    int bucket0 = 0;
+    int bucket1 = 0;
+    int bucket2 = 0;
+    int bucket3 = 0;
+    for (int i = 0; i < 1000; ++i) {
+      long time = wl.generateSubmissionTime();
+      LOG.info("Generated time " + time);
+      if (time < 30) {
+        bucket0++;
+      } else if (time < 60) {
+        bucket1++;
+      } else if (time < 90) {
+        bucket2++;
+      } else {
+        bucket3++;
+      }
+    }
+
+    Assert.assertTrue(bucket0 > 0);
+    Assert.assertTrue(bucket1 == 0);
+    Assert.assertTrue(bucket2 > 0);
+    Assert.assertTrue(bucket3 > 0);
+    Assert.assertTrue(bucket2 > bucket0);
+    Assert.assertTrue(bucket2 > bucket3);
+
+    LOG.info("bucket0 {}, bucket1 {}, bucket2 {}, bucket3 {}", bucket0, bucket1,
+        bucket2, bucket3);
+
+  }
+
+  @Test
+  public void testMapReduce() throws IllegalArgumentException, IOException {
 
     Configuration conf = new Configuration();
 
@@ -50,47 +105,155 @@ public class TestSynthJobGeneration {
 
     SynthTraceJobProducer stjp = new SynthTraceJobProducer(conf);
 
+    LOG.info(stjp.toString());
+
     SynthJob js = (SynthJob) stjp.getNextJob();
 
     int jobCount = 0;
 
     while (js != null) {
-      LOG.info((jobCount++) + " " + js.getQueueName() + " -- "
-          + js.getJobClass().getClassName() + " (conf: "
-          + js.getJobConf().get(MRJobConfig.QUEUE_NAME) + ") " + " submission: "
-          + js.getSubmissionTime() + ", " + " duration: " + js.getDuration()
-          + " numMaps: " + js.getNumberMaps() + " numReduces: "
-          + js.getNumberReduces());
+      LOG.info(js.toString());
+      validateJob(js);
+      js = (SynthJob) stjp.getNextJob();
+      jobCount++;
+    }
 
+    Assert.assertEquals(stjp.getNumJobs(), jobCount);
+  }
+
+  @Test
+  public void testGeneric() throws IllegalArgumentException, IOException {
+    Configuration conf = new Configuration();
+
+    conf.set(SynthTraceJobProducer.SLS_SYNTHETIC_TRACE_FILE,
+        "src/test/resources/syn_generic.json");
+
+    SynthTraceJobProducer stjp = new SynthTraceJobProducer(conf);
+
+    LOG.info(stjp.toString());
+
+    SynthJob js = (SynthJob) stjp.getNextJob();
+
+    int jobCount = 0;
+
+    while (js != null) {
+      LOG.info(js.toString());
       validateJob(js);
       js = (SynthJob) stjp.getNextJob();
+      jobCount++;
     }
 
     Assert.assertEquals(stjp.getNumJobs(), jobCount);
   }
 
-  private void validateJob(SynthJob js) {
+  @Test
+  public void testStream() throws IllegalArgumentException, IOException {
+    Configuration conf = new Configuration();
 
-    assertTrue(js.getSubmissionTime() > 0);
-    assertTrue(js.getDuration() > 0);
-    assertTrue(js.getNumberMaps() >= 0);
-    assertTrue(js.getNumberReduces() >= 0);
-    assertTrue(js.getNumberMaps() + js.getNumberReduces() > 0);
-    assertTrue(js.getTotalSlotTime() >= 0);
+    conf.set(SynthTraceJobProducer.SLS_SYNTHETIC_TRACE_FILE,
+        "src/test/resources/syn_stream.json");
+
+    SynthTraceJobProducer stjp = new SynthTraceJobProducer(conf);
+
+    LOG.info(stjp.toString());
+
+    SynthJob js = (SynthJob) stjp.getNextJob();
+
+    int jobCount = 0;
+
+    while (js != null) {
+      LOG.info(js.toString());
+      validateJob(js);
+      js = (SynthJob) stjp.getNextJob();
+      jobCount++;
+    }
+
+    Assert.assertEquals(stjp.getNumJobs(), jobCount);
+  }
 
-    for (int i = 0; i < js.getNumberMaps(); i++) {
-      TaskAttemptInfo tai = js.getTaskAttemptInfo(TaskType.MAP, i, 0);
-      assertTrue(tai.getRuntime() > 0);
+  @Test
+  public void testSample() throws IOException {
+
+    ObjectMapper mapper = new ObjectMapper();
+    mapper.configure(INTERN_FIELD_NAMES, true);
+    mapper.configure(FAIL_ON_UNKNOWN_PROPERTIES, false);
+
+    JDKRandomGenerator rand = new JDKRandomGenerator();
+    rand.setSeed(0);
+
+    String valJson = "{\"val\" : 5 }";
+    SynthTraceJobProducer.Sample valSample =
+        mapper.readValue(valJson, SynthTraceJobProducer.Sample.class);
+    valSample.init(rand);
+    int val = valSample.getInt();
+    Assert.assertEquals(5, val);
+
+    String distJson = "{\"val\" : 5, \"std\" : 1 }";
+    SynthTraceJobProducer.Sample distSample =
+        mapper.readValue(distJson, SynthTraceJobProducer.Sample.class);
+    distSample.init(rand);
+    double dist = distSample.getDouble();
+    Assert.assertTrue(dist > 2 && dist < 8);
+
+    String normdistJson = "{\"val\" : 5, \"std\" : 1, \"dist\": \"NORM\" }";
+    SynthTraceJobProducer.Sample normdistSample =
+        mapper.readValue(normdistJson, SynthTraceJobProducer.Sample.class);
+    normdistSample.init(rand);
+    double normdist = normdistSample.getDouble();
+    Assert.assertTrue(normdist > 2 && normdist < 8);
+
+    String discreteJson = "{\"discrete\" : [2, 4, 6, 8]}";
+    SynthTraceJobProducer.Sample discreteSample =
+        mapper.readValue(discreteJson, SynthTraceJobProducer.Sample.class);
+    discreteSample.init(rand);
+    int discrete = discreteSample.getInt();
+    Assert.assertTrue(
+        Arrays.asList(new Integer[] {2, 4, 6, 8}).contains(discrete));
+
+    String discreteWeightsJson =
+        "{\"discrete\" : [2, 4, 6, 8], " + "\"weights\": [0, 0, 0, 1]}";
+    SynthTraceJobProducer.Sample discreteWeightsSample = mapper
+        .readValue(discreteWeightsJson, SynthTraceJobProducer.Sample.class);
+    discreteWeightsSample.init(rand);
+    int discreteWeights = discreteWeightsSample.getInt();
+    Assert.assertEquals(8, discreteWeights);
+
+    String invalidJson = "{\"val\" : 5, \"discrete\" : [2, 4, 6, 8], "
+        + "\"weights\": [0, 0, 0, 1]}";
+    try {
+      mapper.readValue(invalidJson, SynthTraceJobProducer.Sample.class);
+      Assert.fail();
+    } catch (JsonMappingException e) {
+      Assert.assertTrue(e.getMessage().startsWith("Instantiation of"));
     }
 
-    for (int i = 0; i < js.getNumberReduces(); i++) {
-      TaskAttemptInfo tai = js.getTaskAttemptInfo(TaskType.REDUCE, i, 0);
-      assertTrue(tai.getRuntime() > 0);
+    String invalidDistJson =
+        "{\"val\" : 5, \"std\" : 1, " + "\"dist\": \"INVALID\" }";
+    try {
+      mapper.readValue(invalidDistJson, SynthTraceJobProducer.Sample.class);
+      Assert.fail();
+    } catch (JsonMappingException e) {
+      Assert.assertTrue(e.getMessage().startsWith("Instantiation of"));
     }
+  }
+
+  private void validateJob(SynthJob js) {
+
+    assertTrue(js.getSubmissionTime() > 0);
+    assertTrue(js.getDuration() > 0);
+    assertTrue(js.getTotalSlotTime() >= 0);
 
     if (js.hasDeadline()) {
       assertTrue(js.getDeadline() > js.getSubmissionTime() + js.getDuration());
     }
 
+    assertTrue(js.getTasks().size() > 0);
+
+    for (SynthJob.SynthTask t : js.getTasks()) {
+      assertTrue(t.getType() != null);
+      assertTrue(t.getTime() > 0);
+      assertTrue(t.getMemory() > 0);
+      assertTrue(t.getVcores() > 0);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
index a67845b..bfc7d0c 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/appmaster/TestAMSimulator.java
@@ -139,7 +139,7 @@ public class TestAMSimulator {
     String queue = "default";
     List<ContainerSimulator> containers = new ArrayList<>();
     app.init(1000, containers, rm, null, 0, 1000000L, "user1", queue, true,
-        appId, 0, SLSConfiguration.getAMContainerResource(conf));
+        appId, 0, SLSConfiguration.getAMContainerResource(conf), null);
     app.firstStep();
 
     verifySchedulerMetrics(appId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml b/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml
index 2f076c2..344024a 100644
--- a/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml
+++ b/hadoop-tools/hadoop-sls/src/test/resources/sls-runner.xml
@@ -45,6 +45,10 @@
     <name>yarn.sls.am.type.mapreduce</name>
     <value>org.apache.hadoop.yarn.sls.appmaster.MRAMSimulator</value>
   </property>
+  <property>
+    <name>yarn.sls.am.type.stream</name>
+    <value>org.apache.hadoop.yarn.sls.appmaster.StreamAMSimulator</value>
+  </property>
 
   <!-- Containers configuration -->
   <property>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/resources/syn.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/syn.json b/hadoop-tools/hadoop-sls/src/test/resources/syn.json
index 8479d23..c6e2c92 100644
--- a/hadoop-tools/hadoop-sls/src/test/resources/syn.json
+++ b/hadoop-tools/hadoop-sls/src/test/resources/syn.json
@@ -45,7 +45,7 @@
         },
         {
           "time": 60,
-          "jobs": 0
+          "weight": 0
         }
       ]
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/resources/syn_generic.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/syn_generic.json b/hadoop-tools/hadoop-sls/src/test/resources/syn_generic.json
new file mode 100644
index 0000000..bde4cd0
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/syn_generic.json
@@ -0,0 +1,54 @@
+{
+  "description": "tiny jobs workload",
+  "num_nodes": 20,
+  "nodes_per_rack": 4,
+  "num_jobs": 10,
+  "rand_seed": 2,
+  "workloads": [
+    {
+      "workload_name": "tiny-test",
+      "workload_weight": 0.5,
+      "description": "Sort jobs",
+      "queue_name": "sls_queue_1",
+      "job_classes": [
+        {
+          "class_name": "class_1",
+          "user_name": "foobar",
+          "class_weight": 1.0,
+          "type": "mapreduce",
+          "deadline_factor": {"val": 10},
+          "duration": {"val": 60, "std": 5},
+          "reservation": {"val": 0.5},
+          "tasks":[
+            {
+              "type": "map",
+              "priority": 20,
+              "count": { "val": 5, "std": 1},
+              "time": {"val": 10, "std": 2},
+              "max_memory": {"val": 1024},
+              "max_vcores": {"val": 1}
+            },
+            {
+              "type": "reduce",
+              "priority": 10,
+              "count": { "val": 5, "std": 1},
+              "time": {"val": 20, "std": 4},
+              "max_memory": {"val": 2048},
+              "max_vcores": {"val": 2}
+            }
+          ]
+        }
+      ],
+      "time_distribution": [
+        {
+          "time": 1,
+          "weight": 100
+        },
+        {
+          "time": 60,
+          "weight": 0
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/resources/syn_stream.json
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/syn_stream.json b/hadoop-tools/hadoop-sls/src/test/resources/syn_stream.json
new file mode 100644
index 0000000..a85065b
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/syn_stream.json
@@ -0,0 +1,46 @@
+{
+  "description": "stream workload",
+  "num_nodes": 20,
+  "nodes_per_rack": 4,
+  "num_jobs": 5,
+  "rand_seed": 2,
+  "workloads": [
+    {
+      "workload_name": "tiny-test",
+      "workload_weight": 1,
+      "description": "long lived streaming jobs",
+      "queue_name": "sls_queue_1",
+      "job_classes": [
+        {
+          "class_name": "class_1",
+          "user_name": "foobar",
+          "class_weight": 1.0,
+          "type": "stream",
+          "deadline_factor": {"val": 10},
+          "duration": {"val": 30, "std": 5},
+          "reservation": {"val": 0.5},
+          "tasks":[
+            {
+              "type": "stream",
+              "priority": 20,
+              "count": { "val": 2},
+              "time": {"val": 60000},
+              "max_memory": {"val": 4096},
+              "max_vcores": {"val": 4}
+            }
+          ]
+        }
+      ],
+      "time_distribution": [
+        {
+          "time": 1,
+          "weight": 100
+        },
+        {
+          "time": 2,
+          "weight": 0
+        }
+      ]
+    }
+  ]
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-7813. Capacity Scheduler Intra-queue Preemption should be configurable for each queue. Contributed by Eric Payne

Posted by ha...@apache.org.
YARN-7813. Capacity Scheduler Intra-queue Preemption should be configurable for each queue. Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94972150
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94972150
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94972150

Branch: refs/heads/HDFS-12996
Commit: 949721508467968d5f46170353716ad04349cd6f
Parents: b9a429b
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Feb 19 14:06:28 2018 -0600
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Feb 19 14:06:28 2018 -0600

----------------------------------------------------------------------
 .../hadoop/yarn/api/records/QueueInfo.java      | 35 +++++++
 .../src/main/proto/yarn_protos.proto            |  1 +
 .../apache/hadoop/yarn/client/cli/QueueCLI.java |  6 ++
 .../hadoop/yarn/client/ProtocolHATestBase.java  |  3 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java     | 96 ++++++++++++++++++--
 .../api/records/impl/pb/QueueInfoPBImpl.java    | 13 +++
 .../hadoop/yarn/api/TestPBImplRecords.java      |  2 +-
 .../capacity/IntraQueueCandidatesSelector.java  |  4 +-
 .../scheduler/capacity/AbstractCSQueue.java     | 70 ++++++++++++--
 .../scheduler/capacity/CSQueue.java             | 16 +++-
 .../CapacitySchedulerConfiguration.java         | 15 +++
 .../webapp/CapacitySchedulerPage.java           |  5 +-
 .../dao/CapacitySchedulerLeafQueueInfo.java     |  6 ++
 .../TestConfigurationMutationACLPolicies.java   |  2 +-
 .../TestSchedulerApplicationAttempt.java        |  2 +-
 .../scheduler/capacity/TestLeafQueue.java       |  2 +-
 .../webapp/TestRMWebServicesCapacitySched.java  |  2 +-
 .../src/site/markdown/CapacityScheduler.md      |  3 +-
 18 files changed, 257 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
index 897b442..57ea9bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
@@ -94,6 +94,26 @@ public abstract class QueueInfo {
     return queueInfo;
   }
 
+  @Private
+  @Unstable
+  public static QueueInfo newInstance(String queueName, float capacity,
+      float maximumCapacity, float currentCapacity,
+      List<QueueInfo> childQueues, List<ApplicationReport> applications,
+      QueueState queueState, Set<String> accessibleNodeLabels,
+      String defaultNodeLabelExpression, QueueStatistics queueStatistics,
+      boolean preemptionDisabled,
+      Map<String, QueueConfigurations> queueConfigurations,
+      boolean intraQueuePreemptionDisabled) {
+    QueueInfo queueInfo = QueueInfo.newInstance(queueName, capacity,
+        maximumCapacity, currentCapacity,
+        childQueues, applications,
+        queueState, accessibleNodeLabels,
+        defaultNodeLabelExpression, queueStatistics,
+        preemptionDisabled, queueConfigurations);
+    queueInfo.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
+    return queueInfo;
+  }
+
   /**
    * Get the <em>name</em> of the queue.
    * @return <em>name</em> of the queue
@@ -261,4 +281,19 @@ public abstract class QueueInfo {
   @Unstable
   public abstract void setQueueConfigurations(
       Map<String, QueueConfigurations> queueConfigurations);
+
+
+  /**
+   * Get the intra-queue preemption status of the queue.
+   * @return if property is not in proto, return null;
+   *        otherwise, return intra-queue preemption status of the queue
+   */
+  @Public
+  @Stable
+  public abstract Boolean getIntraQueuePreemptionDisabled();
+
+  @Private
+  @Unstable
+  public abstract void setIntraQueuePreemptionDisabled(
+      boolean intraQueuePreemptionDisabled);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index d573638..6ca800a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -563,6 +563,7 @@ message QueueInfoProto {
   optional QueueStatisticsProto queueStatistics = 10;
   optional bool preemptionDisabled = 11;
   repeated QueueConfigurationsMapProto queueConfigurationsMap = 12;
+  optional bool intraQueuePreemptionDisabled = 13;
 }
 
 message QueueConfigurationsProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
index 330b081..2c3dfd0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
@@ -158,5 +158,11 @@ public class QueueCLI extends YarnCLI {
       writer.print("\tPreemption : ");
       writer.println(preemptStatus ? "disabled" : "enabled");
     }
+
+    Boolean intraQueuePreemption = queueInfo.getIntraQueuePreemptionDisabled();
+    if (intraQueuePreemption != null) {
+      writer.print("\tIntra-queue Preemption : ");
+      writer.println(intraQueuePreemption ? "disabled" : "enabled");
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
index 54537ce..7937b15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
@@ -665,7 +665,8 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
 
     public QueueInfo createFakeQueueInfo() {
       return QueueInfo.newInstance("root", 100f, 100f, 50f, null,
-          createFakeAppReports(), QueueState.RUNNING, null, null, null, false);
+          createFakeAppReports(), QueueState.RUNNING, null, null, null, false,
+          null, false);
     }
 
     public List<QueueUserACLInfo> createFakeQueueUserACLInfoList() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index fdd3fc8..1f6488d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -1712,7 +1712,8 @@ public class TestYarnCLI {
     nodeLabels.add("GPU");
     nodeLabels.add("JDK_7");
     QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
-        null, null, QueueState.RUNNING, nodeLabels, "GPU", null, false, null);
+        null, null, QueueState.RUNNING, nodeLabels, "GPU", null, false, null,
+        false);
     when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
     int result = cli.run(new String[] { "-status", "queueA" });
     assertEquals(0, result);
@@ -1728,13 +1729,14 @@ public class TestYarnCLI {
     pw.println("\tDefault Node Label expression : " + "GPU");
     pw.println("\tAccessible Node Labels : " + "JDK_7,GPU");
     pw.println("\tPreemption : " + "enabled");
+    pw.println("\tIntra-queue Preemption : " + "enabled");
     pw.close();
     String queueInfoStr = baos.toString("UTF-8");
     Assert.assertEquals(queueInfoStr, sysOutStream.toString());
   }
 
   @Test
-  public void testGetQueueInfoPreemptionEnabled() throws Exception {
+  public void testGetQueueInfoOverrideIntraQueuePreemption() throws Exception {
     CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
     ReservationSystemTestUtil.setupQueueConfiguration(conf);
     conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
@@ -1743,9 +1745,80 @@ public class TestYarnCLI {
     conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
         "org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
         + "ProportionalCapacityPreemptionPolicy");
+    // Turn on cluster-wide intra-queue preemption
+    conf.setBoolean(
+        CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
+    // Disable intra-queue preemption for all queues
+    conf.setBoolean(CapacitySchedulerConfiguration.PREFIX
+        + "root.intra-queue-preemption.disable_preemption", true);
+    // Enable intra-queue preemption for the a1 queue
+    conf.setBoolean(CapacitySchedulerConfiguration.PREFIX
+        + "root.a.a1.intra-queue-preemption.disable_preemption", false);
+    MiniYARNCluster cluster =
+        new MiniYARNCluster("testGetQueueInfoOverrideIntraQueuePreemption",
+            2, 1, 1);
+
+    YarnClient yarnClient = null;
+    try {
+      cluster.init(conf);
+      cluster.start();
+      final Configuration yarnConf = cluster.getConfig();
+      yarnClient = YarnClient.createYarnClient();
+      yarnClient.init(yarnConf);
+      yarnClient.start();
+
+      QueueCLI cli = new QueueCLI();
+      cli.setClient(yarnClient);
+      cli.setSysOutPrintStream(sysOut);
+      cli.setSysErrPrintStream(sysErr);
+      sysOutStream.reset();
+      // Get status for the root.a queue
+      int result = cli.run(new String[] { "-status", "a" });
+      assertEquals(0, result);
+      String queueStatusOut = sysOutStream.toString();
+      Assert.assertTrue(queueStatusOut
+          .contains("\tPreemption : enabled"));
+      // In-queue preemption is disabled at the "root.a" queue level
+      Assert.assertTrue(queueStatusOut
+          .contains("Intra-queue Preemption : disabled"));
+      cli = new QueueCLI();
+      cli.setClient(yarnClient);
+      cli.setSysOutPrintStream(sysOut);
+      cli.setSysErrPrintStream(sysErr);
+      sysOutStream.reset();
+      // Get status for the root.a.a1 queue
+      result = cli.run(new String[] { "-status", "a1" });
+      assertEquals(0, result);
+      queueStatusOut = sysOutStream.toString();
+      Assert.assertTrue(queueStatusOut
+          .contains("\tPreemption : enabled"));
+      // In-queue preemption is enabled at the "root.a.a1" queue level
+      Assert.assertTrue(queueStatusOut
+          .contains("Intra-queue Preemption : enabled"));
+    } finally {
+      // clean-up
+      if (yarnClient != null) {
+        yarnClient.stop();
+      }
+      cluster.stop();
+      cluster.close();
+    }
+  }
+
+  @Test
+  public void testGetQueueInfoPreemptionEnabled() throws Exception {
+    CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+    ReservationSystemTestUtil.setupQueueConfiguration(conf);
+    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
+        ResourceScheduler.class);
     conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
+    conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
+        "org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
+        + "ProportionalCapacityPreemptionPolicy");
+    conf.setBoolean(
+        CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED, true);
     MiniYARNCluster cluster =
-        new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
+        new MiniYARNCluster("testGetQueueInfoPreemptionEnabled", 2, 1, 1);
 
     YarnClient yarnClient = null;
     try {
@@ -1763,8 +1836,11 @@ public class TestYarnCLI {
       sysOutStream.reset();
       int result = cli.run(new String[] { "-status", "a1" });
       assertEquals(0, result);
-      Assert.assertTrue(sysOutStream.toString()
-          .contains("Preemption : enabled"));
+      String queueStatusOut = sysOutStream.toString();
+      Assert.assertTrue(queueStatusOut
+          .contains("\tPreemption : enabled"));
+      Assert.assertTrue(queueStatusOut
+          .contains("Intra-queue Preemption : enabled"));
     } finally {
       // clean-up
       if (yarnClient != null) {
@@ -1804,8 +1880,11 @@ public class TestYarnCLI {
       sysOutStream.reset();
       int result = cli.run(new String[] { "-status", "a1" });
       assertEquals(0, result);
-      Assert.assertTrue(sysOutStream.toString()
-          .contains("Preemption : disabled"));
+      String queueStatusOut = sysOutStream.toString();
+      Assert.assertTrue(queueStatusOut
+          .contains("\tPreemption : disabled"));
+      Assert.assertTrue(queueStatusOut
+          .contains("Intra-queue Preemption : disabled"));
     }
   }
   
@@ -1813,7 +1892,7 @@ public class TestYarnCLI {
   public void testGetQueueInfoWithEmptyNodeLabel() throws Exception {
     QueueCLI cli = createAndGetQueueCLI();
     QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f,
-        null, null, QueueState.RUNNING, null, null, null, true, null);
+        null, null, QueueState.RUNNING, null, null, null, true, null, true);
     when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo);
     int result = cli.run(new String[] { "-status", "queueA" });
     assertEquals(0, result);
@@ -1830,6 +1909,7 @@ public class TestYarnCLI {
         + NodeLabel.DEFAULT_NODE_LABEL_PARTITION);
     pw.println("\tAccessible Node Labels : ");
     pw.println("\tPreemption : " + "disabled");
+    pw.println("\tIntra-queue Preemption : " + "disabled");
     pw.close();
     String queueInfoStr = baos.toString("UTF-8");
     Assert.assertEquals(queueInfoStr, sysOutStream.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
index 1d2a6dd..f735139 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java
@@ -500,4 +500,17 @@ public class QueueInfoPBImpl extends QueueInfo {
     this.queueConfigurations.putAll(queueConfigurations);
   }
 
+  @Override
+  public Boolean getIntraQueuePreemptionDisabled() {
+    QueueInfoProtoOrBuilder p = viaProto ? proto : builder;
+    return (p.hasIntraQueuePreemptionDisabled()) ? p
+        .getIntraQueuePreemptionDisabled() : null;
+  }
+
+  @Override
+  public void setIntraQueuePreemptionDisabled(
+      boolean intraQueuePreemptionDisabled) {
+    maybeInitBuilder();
+    builder.setIntraQueuePreemptionDisabled(intraQueuePreemptionDisabled);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index c9a3b22..dfa0c88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -410,7 +410,7 @@ public class TestPBImplRecords extends BasePBImplRecordsTest {
     // it is recursive(has sub queues)
     typeValueCache.put(QueueInfo.class, QueueInfo.newInstance("root", 1.0f,
         1.0f, 0.1f, null, null, QueueState.RUNNING, ImmutableSet.of("x", "y"),
-        "x && y", null, false));
+        "x && y", null, false, null, false));
     generateByNewInstance(QueueStatistics.class);
     generateByNewInstance(QueueUserACLInfo.class);
     generateByNewInstance(YarnClusterMetrics.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
index 44fa736..5b6932e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/IntraQueueCandidatesSelector.java
@@ -114,8 +114,8 @@ public class IntraQueueCandidatesSelector extends PreemptionCandidatesSelector {
           continue;
         }
 
-        // Don't preempt if disabled for this queue.
-        if (leafQueue.getPreemptionDisabled()) {
+        // Don't preempt if intra-queue preemption is disabled for this queue.
+        if (leafQueue.getIntraQueuePreemptionDisabled()) {
           continue;
         }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 9afbdd5..651d0e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -97,6 +97,9 @@ public abstract class AbstractCSQueue implements CSQueue {
       new HashMap<AccessType, AccessControlList>();
   volatile boolean reservationsContinueLooking;
   private volatile boolean preemptionDisabled;
+  // Indicates if the in-queue preemption setting is ever disabled within the
+  // hierarchy of this queue.
+  private boolean intraQueuePreemptionDisabledInHierarchy;
 
   // Track resource usage-by-label like used-resource/pending-resource, etc.
   volatile ResourceUsage queueUsage;
@@ -405,6 +408,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 
       this.preemptionDisabled = isQueueHierarchyPreemptionDisabled(this,
           configuration);
+      this.intraQueuePreemptionDisabledInHierarchy =
+          isIntraQueueHierarchyPreemptionDisabled(this, configuration);
 
       this.priority = configuration.getQueuePriority(
           getQueuePath());
@@ -613,6 +618,8 @@ public abstract class AbstractCSQueue implements CSQueue {
     queueInfo.setCurrentCapacity(getUsedCapacity());
     queueInfo.setQueueStatistics(getQueueStatistics());
     queueInfo.setPreemptionDisabled(preemptionDisabled);
+    queueInfo.setIntraQueuePreemptionDisabled(
+        getIntraQueuePreemptionDisabled());
     queueInfo.setQueueConfigurations(getQueueConfigurations());
     return queueInfo;
   }
@@ -735,6 +742,16 @@ public abstract class AbstractCSQueue implements CSQueue {
   public boolean getPreemptionDisabled() {
     return preemptionDisabled;
   }
+
+  @Private
+  public boolean getIntraQueuePreemptionDisabled() {
+    return intraQueuePreemptionDisabledInHierarchy || preemptionDisabled;
+  }
+
+  @Private
+  public boolean getIntraQueuePreemptionDisabledInHierarchy() {
+    return intraQueuePreemptionDisabledInHierarchy;
+  }
   
   @Private
   public QueueCapacities getQueueCapacities() {
@@ -757,12 +774,14 @@ public abstract class AbstractCSQueue implements CSQueue {
   }
 
   /**
-   * The specified queue is preemptable if system-wide preemption is turned on
-   * unless any queue in the <em>qPath</em> hierarchy has explicitly turned
-   * preemption off.
-   * NOTE: Preemptability is inherited from a queue's parent.
-   * 
-   * @return true if queue has preemption disabled, false otherwise
+   * The specified queue is cross-queue preemptable if system-wide cross-queue
+   * preemption is turned on unless any queue in the <em>qPath</em> hierarchy
+   * has explicitly turned cross-queue preemption off.
+   * NOTE: Cross-queue preemptability is inherited from a queue's parent.
+   *
+   * @param q queue to check preemption state
+   * @param configuration capacity scheduler config
+   * @return true if queue has cross-queue preemption disabled, false otherwise
    */
   private boolean isQueueHierarchyPreemptionDisabled(CSQueue q,
       CapacitySchedulerConfiguration configuration) {
@@ -790,7 +809,44 @@ public abstract class AbstractCSQueue implements CSQueue {
     return configuration.getPreemptionDisabled(q.getQueuePath(),
                                         parentQ.getPreemptionDisabled());
   }
-  
+
+  /**
+   * The specified queue is intra-queue preemptable if
+   * 1) system-wide intra-queue preemption is turned on
+   * 2) no queue in the <em>qPath</em> hierarchy has explicitly turned off intra
+   *    queue preemption.
+   * NOTE: Intra-queue preemptability is inherited from a queue's parent.
+   *
+   * @param q queue to check intra-queue preemption state
+   * @param configuration capacity scheduler config
+   * @return true if queue has intra-queue preemption disabled, false otherwise
+   */
+  private boolean isIntraQueueHierarchyPreemptionDisabled(CSQueue q,
+      CapacitySchedulerConfiguration configuration) {
+    boolean systemWideIntraQueuePreemption =
+        csContext.getConfiguration().getBoolean(
+            CapacitySchedulerConfiguration.INTRAQUEUE_PREEMPTION_ENABLED,
+            CapacitySchedulerConfiguration
+            .DEFAULT_INTRAQUEUE_PREEMPTION_ENABLED);
+    // Intra-queue preemption is disabled for this queue if the system-wide
+    // intra-queue preemption flag is false
+    if (!systemWideIntraQueuePreemption) return true;
+
+    // Check if this is the root queue and the root queue's intra-queue
+    // preemption disable switch is set
+    CSQueue parentQ = q.getParent();
+    if (parentQ == null) {
+      return configuration
+          .getIntraQueuePreemptionDisabled(q.getQueuePath(), false);
+    }
+
+    // At this point, the master preemption switch is enabled down to this
+    // queue's level. Determine whether or not intra-queue preemption is enabled
+    // down to this queu's level and return that value.
+    return configuration.getIntraQueuePreemptionDisabled(q.getQueuePath(),
+        parentQ.getIntraQueuePreemptionDisabledInHierarchy());
+  }
+
   private Resource getCurrentLimitResource(String nodePartition,
       Resource clusterResource, ResourceLimits currentResourceLimits,
       SchedulingMode schedulingMode) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
index 5dd307c..3963dc0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueue.java
@@ -276,7 +276,21 @@ public interface CSQueue extends SchedulerQueue<CSQueue> {
    * @return true if <em>disable_preemption</em> is set, false if not
    */
   public boolean getPreemptionDisabled();
-  
+
+  /**
+   * Check whether intra-queue preemption is disabled for this queue
+   * @return true if either intra-queue preemption or inter-queue preemption
+   * is disabled for this queue, false if neither is disabled.
+   */
+  public boolean getIntraQueuePreemptionDisabled();
+
+  /**
+   * Determines whether or not the intra-queue preemption disabled switch is set
+   *  at any level in this queue's hierarchy.
+   * @return state of the intra-queue preemption switch at this queue level
+   */
+  public boolean getIntraQueuePreemptionDisabledInHierarchy();
+
   /**
    * Get QueueCapacities of this queue
    * @return queueCapacities

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index e609be9..bdd30b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -1211,6 +1211,21 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
   }
 
   /**
+   * Indicates whether intra-queue preemption is disabled on the specified queue
+   *
+   * @param queue queue path to query
+   * @param defaultVal used as default if the property is not set in the
+   * configuration
+   * @return true if preemption is disabled on queue, false otherwise
+   */
+  public boolean getIntraQueuePreemptionDisabled(String queue,
+      boolean defaultVal) {
+    return
+        getBoolean(getQueuePrefix(queue) + INTRA_QUEUE_PREEMPTION_CONFIG_PREFIX
+            + QUEUE_PREEMPTION_DISABLED, defaultVal);
+  }
+
+  /**
    * Get configured node labels in a given queuePath
    */
   public Set<String> getConfiguredNodeLabels(String queuePath) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 7f025a7..ed2f64e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -200,7 +200,10 @@ class CapacitySchedulerPage extends RmView {
           __("Configured User Limit Factor:", lqinfo.getUserLimitFactor()).
           __("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())).
           __("Ordering Policy: ", lqinfo.getOrderingPolicyInfo()).
-          __("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
+          __("Preemption:",
+              lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
+          __("Intra-queue Preemption:", lqinfo.getIntraQueuePreemptionDisabled()
+                  ? "disabled" : "enabled").
           __("Default Node Label Expression:",
               lqinfo.getDefaultNodeLabelExpression() == null
                   ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
index b5f4e79..a53e921 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
@@ -49,6 +49,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   protected ResourceInfo usedAMResource;
   protected ResourceInfo userAMResourceLimit;
   protected boolean preemptionDisabled;
+  protected boolean intraQueuePreemptionDisabled;
   protected String defaultNodeLabelExpression;
   protected int defaultPriority;
   protected boolean isAutoCreatedLeafQueue;
@@ -72,6 +73,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
     AMResourceLimit = new ResourceInfo(q.getAMResourceLimit());
     usedAMResource = new ResourceInfo(q.getQueueResourceUsage().getAMUsed());
     preemptionDisabled = q.getPreemptionDisabled();
+    intraQueuePreemptionDisabled = q.getIntraQueuePreemptionDisabled();
     orderingPolicyInfo = q.getOrderingPolicy().getInfo();
     defaultNodeLabelExpression = q.getDefaultNodeLabelExpression();
     defaultPriority = q.getDefaultApplicationPriority().getPriority();
@@ -150,6 +152,10 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   public boolean getPreemptionDisabled() {
     return preemptionDisabled;
   }
+
+  public boolean getIntraQueuePreemptionDisabled() {
+    return intraQueuePreemptionDisabled;
+  }
   
   public String getOrderingPolicyInfo() {
     return orderingPolicyInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
index 398e909..9a23c1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestConfigurationMutationACLPolicies.java
@@ -67,7 +67,7 @@ public class TestConfigurationMutationACLPolicies {
   private void mockQueue(String queueName, MutableConfScheduler scheduler)
       throws IOException {
     QueueInfo queueInfo = QueueInfo.newInstance(queueName, 0, 0, 0, null, null,
-        null, null, null, null, false);
+        null, null, null, null, false, null, false);
     when(scheduler.getQueueInfo(eq(queueName), anyBoolean(), anyBoolean()))
         .thenReturn(queueInfo);
     Queue queue = mock(Queue.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
index fa16eff..17f9d23 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java
@@ -165,7 +165,7 @@ public class TestSchedulerApplicationAttempt {
   private Queue createQueue(String name, Queue parent, float capacity) {
     QueueMetrics metrics = QueueMetrics.forQueue(name, parent, false, conf);
     QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null,
-        null, QueueState.RUNNING, null, "", null, false);
+        null, QueueState.RUNNING, null, "", null, false, null, false);
     ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
     Queue queue = mock(Queue.class);
     when(queue.getMetrics()).thenReturn(metrics);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index c45bdb4..04bb791 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -4103,7 +4103,7 @@ public class TestLeafQueue {
       float absCap, Resource res) {
     CSQueueMetrics metrics = CSQueueMetrics.forQueue(name, parent, false, cs.getConf());
     QueueInfo queueInfo = QueueInfo.newInstance(name, capacity, 1.0f, 0, null,
-        null, QueueState.RUNNING, null, "", null, false);
+        null, QueueState.RUNNING, null, "", null, false, null, false);
     ActiveUsersManager activeUsersManager = new ActiveUsersManager(metrics);
     AbstractCSQueue queue = mock(AbstractCSQueue.class);
     when(queue.getMetrics()).thenReturn(metrics);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index edf0652..86a3943 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -357,7 +357,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     int numExpectedElements = 20;
     boolean isParentQueue = true;
     if (!info.has("queues")) {
-      numExpectedElements = 34;
+      numExpectedElements = 35;
       isParentQueue = false;
     }
     assertEquals("incorrect number of elements", numExpectedElements, info.length());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94972150/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 87cfd39..4ecc97a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -236,6 +236,7 @@ The following configuration parameters can be configured in yarn-site.xml to con
 | Property | Description |
 |:---- |:---- |
 | `yarn.scheduler.capacity.<queue-path>.disable_preemption` | This configuration can be set to `true` to selectively disable preemption of application containers submitted to a given queue. This property applies only when system wide preemption is enabled by configuring `yarn.resourcemanager.scheduler.monitor.enable` to *true* and `yarn.resourcemanager.scheduler.monitor.policies` to *ProportionalCapacityPreemptionPolicy*. If this property is not set for a queue, then the property value is inherited from the queue's parent. Default value is false.
+| `yarn.scheduler.capacity.<queue-path>.intra-queue-preemption.disable_preemption` | This configuration can be set to *true* to selectively disable intra-queue preemption of application containers submitted to a given queue. This property applies only when system wide preemption is enabled by configuring `yarn.resourcemanager.scheduler.monitor.enable` to *true*, `yarn.resourcemanager.scheduler.monitor.policies` to *ProportionalCapacityPreemptionPolicy*, and `yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled` to *true*. If this property is not set for a queue, then the property value is inherited from the queue's parent. Default value is *false*.
 
 ###Reservation Properties
 
@@ -477,4 +478,4 @@ Updating a Container (Experimental - API may change in the future)
   
   The **DECREASE_RESOURCE** and **DEMOTE_EXECUTION_TYPE** container updates are automatic - the AM does not explicitly have to ask the NM to decrease the resources of the container. The other update types require the AM to explicitly ask the NM to update the container.
   
-  If the **yarn.resourcemanager.auto-update.containers** configuration parameter is set to **true** (false by default), The RM will ensure that all container updates are automatic.  
\ No newline at end of file
+  If the **yarn.resourcemanager.auto-update.containers** configuration parameter is set to **true** (false by default), The RM will ensure that all container updates are automatic.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HADOOP-15070. add test to verify FileSystem and paths differentiate on user info. Contributed by Steve Loughran.

Posted by ha...@apache.org.
HADOOP-15070. add test to verify FileSystem and paths differentiate on user info.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d37cf67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d37cf67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d37cf67

Branch: refs/heads/HDFS-12996
Commit: 1d37cf675c42f59fab3c7d14d1bad384e4180cbd
Parents: 9497215
Author: Steve Loughran <st...@apache.org>
Authored: Mon Feb 19 20:43:40 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Feb 19 20:43:40 2018 +0000

----------------------------------------------------------------------
 .../apache/hadoop/fs/TestFileSystemCaching.java | 233 +++++++++----------
 1 file changed, 107 insertions(+), 126 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d37cf67/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
index 69ef71e..b3c3847 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
@@ -18,22 +18,20 @@
 
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertNotSame;
-
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+
 import org.junit.Test;
 import java.security.PrivilegedExceptionAction;
 import java.util.concurrent.Semaphore;
 
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.*;
 
@@ -42,14 +40,13 @@ public class TestFileSystemCaching {
 
   @Test
   public void testCacheEnabled() throws Exception {
-    Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
+    Configuration conf = newConf();
     FileSystem fs1 = FileSystem.get(new URI("cachedfile://a"), conf);
     FileSystem fs2 = FileSystem.get(new URI("cachedfile://a"), conf);
     assertSame(fs1, fs2);
   }
 
-  static class DefaultFs extends LocalFileSystem {
+  private static class DefaultFs extends LocalFileSystem {
     URI uri;
     @Override
     public void initialize(URI uri, Configuration conf) {
@@ -67,43 +64,30 @@ public class TestFileSystemCaching {
     conf.set("fs.defaultfs.impl", DefaultFs.class.getName());
     final URI defaultUri = URI.create("defaultfs://host");
     FileSystem.setDefaultUri(conf, defaultUri);
-    FileSystem fs = null;
-    
+
     // sanity check default fs
     final FileSystem defaultFs = FileSystem.get(conf);
     assertEquals(defaultUri, defaultFs.getUri());
     
     // has scheme, no auth
-    fs = FileSystem.get(URI.create("defaultfs:/"), conf);
-    assertSame(defaultFs, fs);
-    fs = FileSystem.get(URI.create("defaultfs:///"), conf);
-    assertSame(defaultFs, fs);
+    assertSame(defaultFs, FileSystem.get(URI.create("defaultfs:/"), conf));
+    assertSame(defaultFs, FileSystem.get(URI.create("defaultfs:///"), conf));
     
     // has scheme, same auth
-    fs = FileSystem.get(URI.create("defaultfs://host"), conf);
-    assertSame(defaultFs, fs);
+    assertSame(defaultFs, FileSystem.get(URI.create("defaultfs://host"), conf));
     
     // has scheme, different auth
-    fs = FileSystem.get(URI.create("defaultfs://host2"), conf);
-    assertNotSame(defaultFs, fs);
+    assertNotSame(defaultFs,
+        FileSystem.get(URI.create("defaultfs://host2"), conf));
     
     // no scheme, no auth
-    fs = FileSystem.get(URI.create("/"), conf);
-    assertSame(defaultFs, fs);
+    assertSame(defaultFs, FileSystem.get(URI.create("/"), conf));
     
     // no scheme, same auth
-    try {
-      fs = FileSystem.get(URI.create("//host"), conf);
-      fail("got fs with auth but no scheme");
-    } catch (UnsupportedFileSystemException e) {
-    }
-
-    // no scheme, different auth
-    try {
-      fs = FileSystem.get(URI.create("//host2"), conf);
-      fail("got fs with auth but no scheme");
-    } catch (UnsupportedFileSystemException e) {
-    }
+    intercept(UnsupportedFileSystemException.class,
+        () -> FileSystem.get(URI.create("//host"), conf));
+    intercept(UnsupportedFileSystemException.class,
+        () -> FileSystem.get(URI.create("//host2"), conf));
   }
   
   public static class InitializeForeverFileSystem extends LocalFileSystem {
@@ -132,9 +116,7 @@ public class TestFileSystemCaching {
          "TestFileSystemCaching$InitializeForeverFileSystem");
         try {
           FileSystem.get(new URI("localfs1://a"), conf);
-        } catch (IOException e) {
-          e.printStackTrace();
-        } catch (URISyntaxException e) {
+        } catch (IOException | URISyntaxException e) {
           e.printStackTrace();
         }
       }
@@ -162,31 +144,15 @@ public class TestFileSystemCaching {
   @SuppressWarnings("unchecked")
   @Test
   public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
-    final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
+    final Configuration conf = newConf();
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
     UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
-    FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return FileSystem.get(new URI("cachedfile://a"), conf);
-      }
-    });
-    FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return FileSystem.get(new URI("cachedfile://a"), conf);
-      }
-    });
+    FileSystem fsA = getCachedFS(ugiA, conf);
+    FileSystem fsA1 = getCachedFS(ugiA, conf);
     //Since the UGIs are the same, we should have the same filesystem for both
     assertSame(fsA, fsA1);
     
-    FileSystem fsB = ugiB.doAs(new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return FileSystem.get(new URI("cachedfile://a"), conf);
-      }
-    });
+    FileSystem fsB = getCachedFS(ugiB, conf);
     //Since the UGIs are different, we should end up with different filesystems
     //corresponding to the two UGIs
     assertNotSame(fsA, fsB);
@@ -194,47 +160,56 @@ public class TestFileSystemCaching {
     Token<T> t1 = mock(Token.class);
     UserGroupInformation ugiA2 = UserGroupInformation.createRemoteUser("foo");
     
-    fsA = ugiA2.doAs(new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return FileSystem.get(new URI("cachedfile://a"), conf);
-      }
-    });
+    fsA = getCachedFS(ugiA2, conf);
     // Although the users in the UGI are same, they have different subjects
     // and so are different.
     assertNotSame(fsA, fsA1);
     
     ugiA.addToken(t1);
     
-    fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return FileSystem.get(new URI("cachedfile://a"), conf);
-      }
-    });
+    fsA = getCachedFS(ugiA, conf);
     // Make sure that different UGI's with the same subject lead to the same
     // file system.
     assertSame(fsA, fsA1);
   }
-  
+
+  /**
+   * Get the cached filesystem for "cachedfile://a" for the supplied user
+   * @param ugi user
+   * @param conf configuration
+   * @return the filesystem
+   * @throws IOException failure to get/init
+   * @throws InterruptedException part of the signature of UGI.doAs()
+   */
+  private FileSystem getCachedFS(UserGroupInformation ugi, Configuration conf)
+      throws IOException, InterruptedException {
+    return ugi.doAs((PrivilegedExceptionAction<FileSystem>)
+            () -> FileSystem.get(new URI("cachedfile://a"), conf));
+  }
+
   @Test
   public void testUserFS() throws Exception {
-    final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
+    final Configuration conf = newConf();
     FileSystem fsU1 = FileSystem.get(new URI("cachedfile://a"), conf, "bar");
     FileSystem fsU2 = FileSystem.get(new URI("cachedfile://a"), conf, "foo");
     
     assertNotSame(fsU1, fsU2);   
   }
-  
+
+  private Configuration newConf() throws IOException {
+    final Configuration conf = new Configuration();
+    conf.set("fs.cachedfile.impl",
+        FileSystem.getFileSystemClass("file", null).getName());
+    return conf;
+  }
+
   @Test
   public void testFsUniqueness() throws Exception {
-    final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
+    final Configuration conf = newConf();
     // multiple invocations of FileSystem.get return the same object.
     FileSystem fs1 = FileSystem.get(conf);
     FileSystem fs2 = FileSystem.get(conf);
-    assertTrue(fs1 == fs2);
+    assertSame(fs1, fs2);
 
     // multiple invocations of FileSystem.newInstance return different objects
     fs1 = FileSystem.newInstance(new URI("cachedfile://a"), conf, "bar");
@@ -246,33 +221,17 @@ public class TestFileSystemCaching {
   
   @Test
   public void testCloseAllForUGI() throws Exception {
-    final Configuration conf = new Configuration();
-    conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
+    final Configuration conf = newConf();
     UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
-    FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return FileSystem.get(new URI("cachedfile://a"), conf);
-      }
-    });
+    FileSystem fsA = getCachedFS(ugiA, conf);
     //Now we should get the cached filesystem
-    FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return FileSystem.get(new URI("cachedfile://a"), conf);
-      }
-    });
+    FileSystem fsA1 = getCachedFS(ugiA, conf);
     assertSame(fsA, fsA1);
     
     FileSystem.closeAllForUGI(ugiA);
     
     //Now we should get a different (newly created) filesystem
-    fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
-      @Override
-      public FileSystem run() throws Exception {
-        return FileSystem.get(new URI("cachedfile://a"), conf);
-      }
-    });
+    fsA1 = getCachedFS(ugiA, conf);
     assertNotSame(fsA, fsA1);
   }
   
@@ -292,16 +251,17 @@ public class TestFileSystemCaching {
   @Test
   public void testDeleteOnExit() throws IOException {
     FileSystem mockFs = mock(FileSystem.class);
-    FileSystem fs = new FilterFileSystem(mockFs);
     Path path = new Path("/a");
+    try (FileSystem fs = new FilterFileSystem(mockFs)) {
 
-    // delete on close if path does exist
-    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
-    assertTrue(fs.deleteOnExit(path));
-    verify(mockFs).getFileStatus(eq(path));
-    reset(mockFs);
-    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
-    fs.close();
+      // delete on close if path does exist
+      when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
+      assertTrue(fs.deleteOnExit(path));
+      verify(mockFs).getFileStatus(eq(path));
+      reset(mockFs);
+      when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
+      fs.close();
+    }
     verify(mockFs).getFileStatus(eq(path));
     verify(mockFs).delete(eq(path), eq(true));
   }
@@ -309,14 +269,16 @@ public class TestFileSystemCaching {
   @Test
   public void testDeleteOnExitFNF() throws IOException {
     FileSystem mockFs = mock(FileSystem.class);
-    FileSystem fs = new FilterFileSystem(mockFs);
-    Path path = new Path("/a");
+    Path path;
+    try (FileSystem fs = new FilterFileSystem(mockFs)) {
+      path = new Path("/a");
 
-    // don't delete on close if path doesn't exist
-    assertFalse(fs.deleteOnExit(path));
-    verify(mockFs).getFileStatus(eq(path));
-    reset(mockFs);
-    fs.close();
+      // don't delete on close if path doesn't exist
+      assertFalse(fs.deleteOnExit(path));
+      verify(mockFs).getFileStatus(eq(path));
+      reset(mockFs);
+      fs.close();
+    }
     verify(mockFs, never()).getFileStatus(eq(path));
     verify(mockFs, never()).delete(any(Path.class), anyBoolean());
   }
@@ -325,15 +287,17 @@ public class TestFileSystemCaching {
   @Test
   public void testDeleteOnExitRemoved() throws IOException {
     FileSystem mockFs = mock(FileSystem.class);
-    FileSystem fs = new FilterFileSystem(mockFs);
-    Path path = new Path("/a");
+    Path path;
+    try (FileSystem fs = new FilterFileSystem(mockFs)) {
+      path = new Path("/a");
 
-    // don't delete on close if path existed, but later removed
-    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
-    assertTrue(fs.deleteOnExit(path));
-    verify(mockFs).getFileStatus(eq(path));
-    reset(mockFs);
-    fs.close();
+      // don't delete on close if path existed, but later removed
+      when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
+      assertTrue(fs.deleteOnExit(path));
+      verify(mockFs).getFileStatus(eq(path));
+      reset(mockFs);
+      fs.close();
+    }
     verify(mockFs).getFileStatus(eq(path));
     verify(mockFs, never()).delete(any(Path.class), anyBoolean());
   }
@@ -341,18 +305,35 @@ public class TestFileSystemCaching {
   @Test
   public void testCancelDeleteOnExit() throws IOException {
     FileSystem mockFs = mock(FileSystem.class);
-    FileSystem fs = new FilterFileSystem(mockFs);
-    Path path = new Path("/a");
+    try (FileSystem fs = new FilterFileSystem(mockFs)) {
+      Path path = new Path("/a");
 
-    // don't delete on close if path existed, but later cancelled
-    when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
-    assertTrue(fs.deleteOnExit(path));
-    verify(mockFs).getFileStatus(eq(path));
-    assertTrue(fs.cancelDeleteOnExit(path));
-    assertFalse(fs.cancelDeleteOnExit(path)); // false because not registered
-    reset(mockFs);
-    fs.close();
+      // don't delete on close if path existed, but later cancelled
+      when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
+      assertTrue(fs.deleteOnExit(path));
+      verify(mockFs).getFileStatus(eq(path));
+      assertTrue(fs.cancelDeleteOnExit(path));
+      assertFalse(fs.cancelDeleteOnExit(path)); // false because not registered
+      reset(mockFs);
+      fs.close();
+    }
     verify(mockFs, never()).getFileStatus(any(Path.class));
     verify(mockFs, never()).delete(any(Path.class), anyBoolean());
   }
+
+  @Test
+  public void testCacheIncludesURIUserInfo() throws Throwable {
+    URI containerA = new URI("wasb://a@account.blob.core.windows.net");
+    URI containerB = new URI("wasb://b@account.blob.core.windows.net");
+    Configuration conf = new Configuration(false);
+    FileSystem.Cache.Key keyA = new FileSystem.Cache.Key(containerA, conf);
+    FileSystem.Cache.Key keyB = new FileSystem.Cache.Key(containerB, conf);
+    assertNotEquals(keyA, keyB);
+    assertNotEquals(keyA, new FileSystem.Cache.Key(
+        new URI("wasb://account.blob.core.windows.net"), conf));
+    assertEquals(keyA, new FileSystem.Cache.Key(
+        new URI("wasb://A@account.blob.core.windows.net"), conf));
+    assertNotEquals(keyA, new FileSystem.Cache.Key(
+        new URI("wasb://a:password@account.blob.core.windows.net"), conf));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-7836. Added error check for updating service components. (Contributed by Gour Saha)

Posted by ha...@apache.org.
YARN-7836.  Added error check for updating service components.
            (Contributed by Gour Saha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19096900
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19096900
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19096900

Branch: refs/heads/HDFS-12996
Commit: 190969006d4a7f9ef86d67bba472f7dc5642668a
Parents: 84a1321
Author: Eric Yang <ey...@apache.org>
Authored: Thu Feb 22 16:08:30 2018 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Thu Feb 22 16:08:30 2018 -0500

----------------------------------------------------------------------
 .../hadoop/yarn/service/webapp/ApiServer.java   | 23 +++++--
 .../hadoop/yarn/service/TestApiServer.java      | 69 ++++++++++++++++----
 2 files changed, 75 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19096900/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index e58938e..1528596 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -280,14 +280,25 @@ public class ApiServer {
       @PathParam(COMPONENT_NAME) String componentName, Component component) {
 
     try {
-      UserGroupInformation ugi = getProxyUser(request);
+      if (component == null) {
+        throw new YarnException("No component data provided");
+      }
+      if (component.getName() != null
+          && !component.getName().equals(componentName)) {
+        String msg = "Component name in the request object ("
+            + component.getName() + ") does not match that in the URI path ("
+            + componentName + ")";
+        throw new YarnException(msg);
+      }
+      if (component.getNumberOfContainers() == null) {
+        throw new YarnException("No container count provided");
+      }
       if (component.getNumberOfContainers() < 0) {
-        String message =
-            "Service = " + appName + ", Component = " + component.getName()
-                + ": Invalid number of containers specified " + component
-                .getNumberOfContainers();
+        String message = "Invalid number of containers specified "
+            + component.getNumberOfContainers();
         throw new YarnException(message);
       }
+      UserGroupInformation ugi = getProxyUser(request);
       Map<String, Long> original = ugi
           .doAs(new PrivilegedExceptionAction<Map<String, Long>>() {
             @Override
@@ -296,7 +307,7 @@ public class ApiServer {
               sc.init(YARN_CONFIG);
               sc.start();
               Map<String, Long> original = sc.flexByRestService(appName,
-                  Collections.singletonMap(component.getName(),
+                  Collections.singletonMap(componentName,
                       component.getNumberOfContainers()));
               sc.close();
               return original;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19096900/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
index 52057db..4629d28 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
@@ -17,6 +17,16 @@
 
 package org.apache.hadoop.yarn.service;
 
+import static org.junit.Assert.*;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.Path;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.Status;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.service.api.records.Artifact;
 import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum;
@@ -24,23 +34,13 @@ import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Resource;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ServiceState;
+import org.apache.hadoop.yarn.service.api.records.ServiceStatus;
 import org.apache.hadoop.yarn.service.client.ServiceClient;
 import org.apache.hadoop.yarn.service.webapp.ApiServer;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.ws.rs.Path;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
-
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.*;
-
 /**
  * Test case for ApiServer REST API.
  *
@@ -370,4 +370,51 @@ public class TestApiServer {
         Response.status(Status.BAD_REQUEST)
             .build().getStatus());
   }
+
+  @Test
+  public void testUpdateComponent() {
+    Response actual = apiServer.updateComponent(request, "jenkins",
+        "jenkins-master", null);
+    ServiceStatus serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Update component should have failed with 400 bad request",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    assertEquals("Update component should have failed with no data error",
+        "No component data provided", serviceStatus.getDiagnostics());
+
+    Component comp = new Component();
+    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
+        comp);
+    serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Update component should have failed with 400 bad request",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    assertEquals("Update component should have failed with no count error",
+        "No container count provided", serviceStatus.getDiagnostics());
+
+    comp.setNumberOfContainers(-1L);
+    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
+        comp);
+    serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Update component should have failed with 400 bad request",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    assertEquals("Update component should have failed with no count error",
+        "Invalid number of containers specified -1", serviceStatus.getDiagnostics());
+
+    comp.setName("jenkins-slave");
+    comp.setNumberOfContainers(1L);
+    actual = apiServer.updateComponent(request, "jenkins", "jenkins-master",
+        comp);
+    serviceStatus = (ServiceStatus) actual.getEntity();
+    assertEquals("Update component should have failed with 400 bad request",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+    assertEquals(
+        "Update component should have failed with component name mismatch "
+            + "error",
+        "Component name in the request object (jenkins-slave) does not match "
+            + "that in the URI path (jenkins-master)",
+        serviceStatus.getDiagnostics());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
deleted file mode 100644
index c115b18..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Query;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.text.NumberFormat;
-
-/**
- * A bunch of utility functions used in HBase TimelineService backend.
- */
-public final class HBaseTimelineStorageUtils {
-  /** milliseconds in one day. */
-  public static final long MILLIS_ONE_DAY = 86400000L;
-  private static final Logger LOG =
-      LoggerFactory.getLogger(HBaseTimelineStorageUtils.class);
-
-  private HBaseTimelineStorageUtils() {
-  }
-
-
-  /**
-   * Combines the input array of attributes and the input aggregation operation
-   * into a new array of attributes.
-   *
-   * @param attributes Attributes to be combined.
-   * @param aggOp Aggregation operation.
-   * @return array of combined attributes.
-   */
-  public static Attribute[] combineAttributes(Attribute[] attributes,
-                                              AggregationOperation aggOp) {
-    int newLength = getNewLengthCombinedAttributes(attributes, aggOp);
-    Attribute[] combinedAttributes = new Attribute[newLength];
-
-    if (attributes != null) {
-      System.arraycopy(attributes, 0, combinedAttributes, 0, attributes.length);
-    }
-
-    if (aggOp != null) {
-      Attribute a2 = aggOp.getAttribute();
-      combinedAttributes[newLength - 1] = a2;
-    }
-    return combinedAttributes;
-  }
-
-  /**
-   * Returns a number for the new array size. The new array is the combination
-   * of input array of attributes and the input aggregation operation.
-   *
-   * @param attributes Attributes.
-   * @param aggOp Aggregation operation.
-   * @return the size for the new array
-   */
-  private static int getNewLengthCombinedAttributes(Attribute[] attributes,
-      AggregationOperation aggOp) {
-    int oldLength = getAttributesLength(attributes);
-    int aggLength = getAppOpLength(aggOp);
-    return oldLength + aggLength;
-  }
-
-  private static int getAppOpLength(AggregationOperation aggOp) {
-    if (aggOp != null) {
-      return 1;
-    }
-    return 0;
-  }
-
-  private static int getAttributesLength(Attribute[] attributes) {
-    if (attributes != null) {
-      return attributes.length;
-    }
-    return 0;
-  }
-
-  /**
-   * Returns the first seen aggregation operation as seen in the list of input
-   * tags or null otherwise.
-   *
-   * @param tags list of HBase tags.
-   * @return AggregationOperation
-   */
-  public static AggregationOperation getAggregationOperationFromTagsList(
-      List<Tag> tags) {
-    for (AggregationOperation aggOp : AggregationOperation.values()) {
-      for (Tag tag : tags) {
-        if (tag.getType() == aggOp.getTagType()) {
-          return aggOp;
-        }
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Creates a {@link Tag} from the input attribute.
-   *
-   * @param attribute Attribute from which tag has to be fetched.
-   * @return a HBase Tag.
-   */
-  public static Tag getTagFromAttribute(Map.Entry<String, byte[]> attribute) {
-    // attribute could be either an Aggregation Operation or
-    // an Aggregation Dimension
-    // Get the Tag type from either
-    AggregationOperation aggOp = AggregationOperation
-        .getAggregationOperation(attribute.getKey());
-    if (aggOp != null) {
-      Tag t = new Tag(aggOp.getTagType(), attribute.getValue());
-      return t;
-    }
-
-    AggregationCompactionDimension aggCompactDim =
-        AggregationCompactionDimension.getAggregationCompactionDimension(
-            attribute.getKey());
-    if (aggCompactDim != null) {
-      Tag t = new Tag(aggCompactDim.getTagType(), attribute.getValue());
-      return t;
-    }
-    return null;
-  }
-
-  /**
-   * creates a new cell based on the input cell but with the new value.
-   *
-   * @param origCell Original cell
-   * @param newValue new cell value
-   * @return cell
-   * @throws IOException while creating new cell.
-   */
-  public static Cell createNewCell(Cell origCell, byte[] newValue)
-      throws IOException {
-    return CellUtil.createCell(CellUtil.cloneRow(origCell),
-        CellUtil.cloneFamily(origCell), CellUtil.cloneQualifier(origCell),
-        origCell.getTimestamp(), KeyValue.Type.Put.getCode(), newValue);
-  }
-
-  /**
-   * creates a cell with the given inputs.
-   *
-   * @param row row of the cell to be created
-   * @param family column family name of the new cell
-   * @param qualifier qualifier for the new cell
-   * @param ts timestamp of the new cell
-   * @param newValue value of the new cell
-   * @param tags tags in the new cell
-   * @return cell
-   * @throws IOException while creating the cell.
-   */
-  public static Cell createNewCell(byte[] row, byte[] family, byte[] qualifier,
-      long ts, byte[] newValue, byte[] tags) throws IOException {
-    return CellUtil.createCell(row, family, qualifier, ts, KeyValue.Type.Put,
-        newValue, tags);
-  }
-
-  /**
-   * returns app id from the list of tags.
-   *
-   * @param tags cell tags to be looked into
-   * @return App Id as the AggregationCompactionDimension
-   */
-  public static String getAggregationCompactionDimension(List<Tag> tags) {
-    String appId = null;
-    for (Tag t : tags) {
-      if (AggregationCompactionDimension.APPLICATION_ID.getTagType() == t
-          .getType()) {
-        appId = Bytes.toString(t.getValue());
-        return appId;
-      }
-    }
-    return appId;
-  }
-
-  /**
-   * Converts an int into it's inverse int to be used in (row) keys
-   * where we want to have the largest int value in the top of the table
-   * (scans start at the largest int first).
-   *
-   * @param key value to be inverted so that the latest version will be first in
-   *          a scan.
-   * @return inverted int
-   */
-  public static int invertInt(int key) {
-    return Integer.MAX_VALUE - key;
-  }
-
-  /**
-   * returns the timestamp of that day's start (which is midnight 00:00:00 AM)
-   * for a given input timestamp.
-   *
-   * @param ts Timestamp.
-   * @return timestamp of that day's beginning (midnight)
-   */
-  public static long getTopOfTheDayTimestamp(long ts) {
-    long dayTimestamp = ts - (ts % MILLIS_ONE_DAY);
-    return dayTimestamp;
-  }
-
-  private static final ThreadLocal<NumberFormat> APP_ID_FORMAT =
-      new ThreadLocal<NumberFormat>() {
-        @Override
-        public NumberFormat initialValue() {
-          NumberFormat fmt = NumberFormat.getInstance();
-          fmt.setGroupingUsed(false);
-          fmt.setMinimumIntegerDigits(4);
-          return fmt;
-        }
-      };
-
-  /**
-   * A utility method that converts ApplicationId to string without using
-   * FastNumberFormat in order to avoid the incompatibility issue caused
-   * by mixing hadoop-common 2.5.1 and hadoop-yarn-api 3.0 in this module.
-   * This is a work-around implementation as discussed in YARN-6905.
-   *
-   * @param appId application id
-   * @return the string representation of the given application id
-   *
-   */
-  public static String convertApplicationIdToString(ApplicationId appId) {
-    StringBuilder sb = new StringBuilder(64);
-    sb.append(ApplicationId.appIdStrPrefix);
-    sb.append("_");
-    sb.append(appId.getClusterTimestamp());
-    sb.append('_');
-    sb.append(APP_ID_FORMAT.get().format(appId.getId()));
-    return sb.toString();
-  }
-
-  /**
-   * @param conf YARN configuration. Used to see if there is an explicit config
-   *          pointing to the HBase config file to read. It should not be null
-   *          or a NullPointerException will be thrown.
-   * @return a configuration with the HBase configuration from the classpath,
-   *         optionally overwritten by the timeline service configuration URL if
-   *         specified.
-   * @throws MalformedURLException if a timeline service HBase configuration URL
-   *           is specified but is a malformed URL.
-   */
-  public static Configuration getTimelineServiceHBaseConf(Configuration conf)
-      throws MalformedURLException {
-    if (conf == null) {
-      throw new NullPointerException();
-    }
-
-    Configuration hbaseConf;
-    String timelineServiceHBaseConfFileURL =
-        conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
-    if (timelineServiceHBaseConfFileURL != null
-        && timelineServiceHBaseConfFileURL.length() > 0) {
-      LOG.info("Using hbase configuration at " +
-          timelineServiceHBaseConfFileURL);
-      // create a clone so that we don't mess with out input one
-      hbaseConf = new Configuration(conf);
-      Configuration plainHBaseConf = new Configuration(false);
-      URL hbaseSiteXML = new URL(timelineServiceHBaseConfFileURL);
-      plainHBaseConf.addResource(hbaseSiteXML);
-      HBaseConfiguration.merge(hbaseConf, plainHBaseConf);
-    } else {
-      // default to what is on the classpath
-      hbaseConf = HBaseConfiguration.create(conf);
-    }
-    return hbaseConf;
-  }
-
-  /**
-   * Given a row key prefix stored in a byte array, return a byte array for its
-   * immediate next row key.
-   *
-   * @param rowKeyPrefix The provided row key prefix, represented in an array.
-   * @return the closest next row key of the provided row key.
-   */
-  public static byte[] calculateTheClosestNextRowKeyForPrefix(
-      byte[] rowKeyPrefix) {
-    // Essentially we are treating it like an 'unsigned very very long' and
-    // doing +1 manually.
-    // Search for the place where the trailing 0xFFs start
-    int offset = rowKeyPrefix.length;
-    while (offset > 0) {
-      if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
-        break;
-      }
-      offset--;
-    }
-
-    if (offset == 0) {
-      // We got an 0xFFFF... (only FFs) stopRow value which is
-      // the last possible prefix before the end of the table.
-      // So set it to stop at the 'end of the table'
-      return HConstants.EMPTY_END_ROW;
-    }
-
-    // Copy the right length of the original
-    byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
-    // And increment the last one
-    newStopRow[newStopRow.length - 1]++;
-    return newStopRow;
-  }
-
-  /**
-   * Checks if passed object is of integral type(Short/Integer/Long).
-   *
-   * @param obj Object to be checked.
-   * @return true if object passed is of type Short or Integer or Long, false
-   * otherwise.
-   */
-  public static boolean isIntegralValue(Object obj) {
-    return (obj instanceof Short) || (obj instanceof Integer) ||
-        (obj instanceof Long);
-  }
-
-  public static void setMetricsTimeRange(Query query, byte[] metricsCf,
-      long tsBegin, long tsEnd) {
-    if (tsBegin != 0 || tsEnd != Long.MAX_VALUE) {
-      query.setColumnFamilyTimeRange(metricsCf,
-          tsBegin, ((tsEnd == Long.MAX_VALUE) ? Long.MAX_VALUE : (tsEnd + 1)));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java
deleted file mode 100644
index 4229e81..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverter.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-/**
- * Interface which has to be implemented for encoding and decoding row keys and
- * columns.
- */
-public interface KeyConverter<T> {
-  /**
-   * Encodes a key as a byte array.
-   *
-   * @param key key to be encoded.
-   * @return a byte array.
-   */
-  byte[] encode(T key);
-
-  /**
-   * Decodes a byte array and returns a key of type T.
-   *
-   * @param bytes byte representation
-   * @return an object(key) of type T which has been constructed after decoding
-   * the bytes.
-   */
-  T decode(byte[] bytes);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
deleted file mode 100644
index 1f52a7b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/KeyConverterToString.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-/**
- * Interface which has to be implemented for encoding and decoding row keys or
- * column qualifiers as string.
- */
-public interface KeyConverterToString<T> {
-  /**
-   * Encode key as string.
-   * @param key of type T to be encoded as string.
-   * @return encoded value as string.
-   */
-  String encodeAsString(T key);
-
-  /**
-   * Decode row key from string to a key of type T.
-   * @param encodedKey string representation of row key
-   * @return type T which has been constructed after decoding string.
-   */
-  T decodeFromString(String encodedKey);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
deleted file mode 100644
index 6ab69f7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.io.Serializable;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Encodes a value by interpreting it as a Long and converting it to bytes and
- * decodes a set of bytes as a Long.
- */
-public final class LongConverter implements NumericValueConverter,
-    Serializable {
-
-  /**
-   * Added because we implement Comparator<Number>.
-   */
-  private static final long serialVersionUID = 1L;
-
-  public LongConverter() {
-  }
-
-  @Override
-  public byte[] encodeValue(Object value) throws IOException {
-    if (!HBaseTimelineStorageUtils.isIntegralValue(value)) {
-      throw new IOException("Expected integral value");
-    }
-    return Bytes.toBytes(((Number)value).longValue());
-  }
-
-  @Override
-  public Object decodeValue(byte[] bytes) throws IOException {
-    if (bytes == null) {
-      return null;
-    }
-    return Bytes.toLong(bytes);
-  }
-
-  /**
-   * Compares two numbers as longs. If either number is null, it will be taken
-   * as 0.
-   *
-   * @param num1 the first {@code Long} to compare.
-   * @param num2 the second {@code Long} to compare.
-   * @return -1 if num1 is less than num2, 0 if num1 is equal to num2 and 1 if
-   * num1 is greater than num2.
-   */
-  @Override
-  public int compare(Number num1, Number num2) {
-    return Long.compare((num1 == null) ? 0L : num1.longValue(),
-        (num2 == null) ? 0L : num2.longValue());
-  }
-
-  @Override
-  public Number add(Number num1, Number num2, Number...numbers) {
-    long sum = ((num1 == null) ? 0L : num1.longValue()) +
-        ((num2 == null) ? 0L : num2.longValue());
-    for (Number num : numbers) {
-      sum = sum + ((num == null) ? 0L : num.longValue());
-    }
-    return sum;
-  }
-
-  /**
-   * Converts a timestamp into it's inverse timestamp to be used in (row) keys
-   * where we want to have the most recent timestamp in the top of the table
-   * (scans start at the most recent timestamp first).
-   *
-   * @param key value to be inverted so that the latest version will be first in
-   *          a scan.
-   * @return inverted long
-   */
-  public static long invertLong(long key) {
-    return Long.MAX_VALUE - key;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java
deleted file mode 100644
index 4a724d6..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-/**
- * Encodes and decodes column names / row keys which are long.
- */
-public final class LongKeyConverter implements KeyConverter<Long> {
-
-  /**
-   * To delegate the actual work to.
-   */
-  private final LongConverter longConverter = new LongConverter();
-
-  public LongKeyConverter() {
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
-   * #encode(java.lang.Object)
-   */
-  @Override
-  public byte[] encode(Long key) {
-    try {
-      // IOException will not be thrown here as we are explicitly passing
-      // Long.
-      return longConverter.encodeValue(key);
-    } catch (IOException e) {
-      return null;
-    }
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
-   * #decode(byte[])
-   */
-  @Override
-  public Long decode(byte[] bytes) {
-    try {
-      return (Long) longConverter.decodeValue(bytes);
-    } catch (IOException e) {
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/NumericValueConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/NumericValueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/NumericValueConverter.java
deleted file mode 100644
index 8fb6536..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/NumericValueConverter.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.util.Comparator;
-
-/**
- * Extends ValueConverter interface for numeric converters to support numerical
- * operations such as comparison, addition, etc.
- */
-public interface NumericValueConverter extends ValueConverter,
-    Comparator<Number> {
-  /**
-   * Adds two or more numbers. If either of the numbers are null, it is taken as
-   * 0.
-   *
-   * @param num1 the first number to add.
-   * @param num2 the second number to add.
-   * @param numbers Rest of the numbers to be added.
-   * @return result after adding up the numbers.
-   */
-  Number add(Number num1, Number num2, Number...numbers);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Range.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Range.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Range.java
deleted file mode 100644
index 8a2e01a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Range.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Encapsulates a range with start and end indices.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class Range {
-  private final int startIdx;
-  private final int endIdx;
-
-  /**
-   * Defines a range from start index (inclusive) to end index (exclusive).
-   *
-   * @param start
-   *          Starting index position
-   * @param end
-   *          Ending index position (exclusive)
-   */
-  public Range(int start, int end) {
-    if (start < 0 || end < start) {
-      throw new IllegalArgumentException(
-          "Invalid range, required that: 0 <= start <= end; start=" + start
-              + ", end=" + end);
-    }
-
-    this.startIdx = start;
-    this.endIdx = end;
-  }
-
-  public int start() {
-    return startIdx;
-  }
-
-  public int end() {
-    return endIdx;
-  }
-
-  public int length() {
-    return endIdx - startIdx;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/RowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/RowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/RowKeyPrefix.java
deleted file mode 100644
index 6159dc7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/RowKeyPrefix.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-/**
- * In queries where a single result is needed, an exact rowkey can be used
- * through the corresponding rowkey#getRowKey() method. For queries that need to
- * scan over a range of rowkeys, a partial (the initial part) of rowkeys are
- * used. Classes implementing RowKeyPrefix indicate that they are the initial
- * part of rowkeys, with different constructors with fewer number of argument to
- * form a partial rowkey, a prefix.
- *
- * @param <R> indicating the type of rowkey that a particular implementation is
- *          a prefix for.
- */
-public interface RowKeyPrefix<R> {
-
-  /**
-   * Create a row key prefix, meaning a partial rowkey that can be used in range
-   * scans. Which fields are included in the prefix will depend on the
-   * constructor of the specific instance that was used. Output depends on which
-   * constructor was used.
-   * @return a prefix of the following form {@code fist!second!...!last!}
-   */
-  byte[] getRowKeyPrefix();
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
deleted file mode 100644
index 5090b4d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
+++ /dev/null
@@ -1,575 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Used to separate row qualifiers, column qualifiers and compound fields.
- */
-public enum Separator {
-
-  /**
-   * separator in key or column qualifier fields.
-   */
-  QUALIFIERS("!", "%0$"),
-
-  /**
-   * separator in values, and/or compound key/column qualifier fields.
-   */
-  VALUES("=", "%1$"),
-
-  /**
-   * separator in values, often used to avoid having these in qualifiers and
-   * names. Note that if we use HTML form encoding through URLEncoder, we end up
-   * getting a + for a space, which may already occur in strings, so we don't
-   * want that.
-   */
-  SPACE(" ", "%2$"),
-
-  /**
-   * separator in values, often used to avoid having these in qualifiers and
-   * names.
-   */
-  TAB("\t", "%3$");
-
-  // a reserved character that starts each of the encoded values and is encoded
-  // first in order to escape naturally occurring instances of encoded values
-  // although it can be expressed as an enum instance, we define them as private
-  // variables to hide it from callers
-  private static final String PERCENT = "%";
-  private static final String PERCENT_ENCODED = "%9$";
-
-  private static final Pattern PERCENT_PATTERN =
-      Pattern.compile(PERCENT, Pattern.LITERAL);
-  private static final String PERCENT_REPLACEMENT =
-      Matcher.quoteReplacement(PERCENT);
-
-  private static final Pattern PERCENT_ENCODED_PATTERN =
-      Pattern.compile(PERCENT_ENCODED, Pattern.LITERAL);
-  private static final String PERCENT_ENCODED_REPLACEMENT =
-      Matcher.quoteReplacement(PERCENT_ENCODED);
-
-  /**
-   * The string value of this separator.
-   */
-  private final String value;
-
-  /**
-   * The bye representation of value.
-   */
-  private final byte[] bytes;
-
-  // pre-compiled patterns and quoted replacements for optimization
-  private final Pattern valuePattern;
-  private final String valueReplacement;
-
-  private final Pattern encodedValuePattern;
-  private final String encodedValueReplacement;
-
-  /**
-   * Indicator for variable size of an individual segment in a split. The
-   * segment ends wherever separator is encountered.
-   * Typically used for string.
-   * Also used to indicate that there is no fixed number of splits which need to
-   * be returned. If split limit is specified as this, all possible splits are
-   * returned.
-   */
-  public static final int VARIABLE_SIZE = 0;
-
-
-  /** empty string. */
-  public static final String EMPTY_STRING = "";
-
-  /** empty bytes. */
-  public static final byte[] EMPTY_BYTES = new byte[0];
-
-  /**
-   * @param value of the separator to use. Cannot be null or empty string.
-   * @param encodedValue choose something that isn't likely to occur in the data
-   *          itself. Cannot be null or empty string.
-   */
-  private Separator(String value, String encodedValue) {
-    this.value = value;
-
-    // validation
-    if (value == null || value.length() == 0 || encodedValue == null
-        || encodedValue.length() == 0) {
-      throw new IllegalArgumentException(
-          "Cannot create separator from null or empty string.");
-    }
-
-    this.bytes = Bytes.toBytes(value);
-    this.valuePattern = Pattern.compile(value, Pattern.LITERAL);
-    this.valueReplacement = Matcher.quoteReplacement(value);
-
-    this.encodedValuePattern = Pattern.compile(encodedValue, Pattern.LITERAL);
-    this.encodedValueReplacement = Matcher.quoteReplacement(encodedValue);
-  }
-
-  /**
-   * @return the original value of the separator
-   */
-  public String getValue() {
-    return value;
-  }
-
-  /**
-   * Used to make token safe to be used with this separator without collisions.
-   * It <em>must</em> be paired with {@link #decode(String)} for it to be
-   * decoded correctly.
-   * <p>
-   * If you need to encode a given string for multiple separators,
-   * {@link #encode(String, Separator...)} should be used over successive
-   * invocations of this method. It will result in a more compact version of the
-   * encoded value.
-   *
-   * @param token Token to be encoded.
-   * @return the token with any occurrences of this separator URLEncoded.
-   */
-  public String encode(String token) {
-    if (token == null || token.length() == 0) {
-      // Nothing to replace
-      return token;
-    }
-    // first encode the percent to escape naturally occurring encoded values
-    String escaped = encodePercent(token);
-    return encodeSingle(escaped, this);
-  }
-
-  private static String replace(String token, Pattern pattern,
-      String replacement) {
-    return pattern.matcher(token).replaceAll(replacement);
-  }
-
-  private static String encodeSingle(String token, Separator separator) {
-    return replace(token, separator.valuePattern,
-        separator.encodedValueReplacement);
-  }
-
-  private static String encodePercent(String token) {
-    return replace(token, PERCENT_PATTERN, PERCENT_ENCODED_REPLACEMENT);
-  }
-
-  /**
-   * Decode the token encoded using {@link #encode(String)}. It <em>must</em> be
-   * used for the result encoded with {@link #encode(String)} to be able to
-   * recover the original.
-   *
-   * @param token Token to be decoded.
-   * @return the token with any occurrences of the encoded separator replaced by
-   *         the separator itself.
-   */
-  public String decode(String token) {
-    if (token == null || token.length() == 0) {
-      // Nothing to replace
-      return token;
-    }
-    String escaped = decodeSingle(token, this);
-    // decode percent to de-escape
-    return decodePercent(escaped);
-  }
-
-  private static String decodeSingle(String token, Separator separator) {
-    return replace(token, separator.encodedValuePattern,
-        separator.valueReplacement);
-  }
-
-  private static String decodePercent(String token) {
-    return replace(token, PERCENT_ENCODED_PATTERN, PERCENT_REPLACEMENT);
-  }
-
-  /**
-   * Encode the given separators in the token with their encoding equivalents.
-   * It <em>must</em> be paired with {@link #decode(byte[], Separator...)} or
-   * {@link #decode(String, Separator...)} with the same separators for it to be
-   * decoded correctly.
-   * <p>
-   * If you need to encode a given string for multiple separators, this form of
-   * encoding should be used over successive invocations of
-   * {@link #encode(String)}. It will result in a more compact version of the
-   * encoded value.
-   *
-   * @param token containing possible separators that need to be encoded.
-   * @param separators to be encoded in the token with their URLEncoding
-   *          equivalent.
-   * @return non-null byte representation of the token with occurrences of the
-   *         separators encoded.
-   */
-  public static byte[] encode(String token, Separator... separators) {
-    if (token == null || token.length() == 0) {
-      return EMPTY_BYTES;
-    }
-    String result = token;
-    // first encode the percent to escape naturally occurring encoded values
-    result = encodePercent(token);
-    for (Separator separator : separators) {
-      if (separator != null) {
-        result = encodeSingle(result, separator);
-      }
-    }
-    return Bytes.toBytes(result);
-  }
-
-  /**
-   * Decode the given separators in the token with their decoding equivalents.
-   * It <em>must</em> be used for the result encoded with
-   * {@link #encode(String, Separator...)} with the same separators to be able
-   * to recover the original.
-   *
-   * @param token containing possible separators that need to be encoded.
-   * @param separators to be encoded in the token with their URLEncoding
-   *          equivalent.
-   * @return String representation of the token with occurrences of the URL
-   *         encoded separators decoded.
-   */
-  public static String decode(byte[] token, Separator... separators) {
-    if (token == null) {
-      return null;
-    }
-    return decode(Bytes.toString(token), separators);
-  }
-
-  /**
-   * Decode the given separators in the token with their decoding equivalents.
-   * It <em>must</em> be used for the result encoded with
-   * {@link #encode(String, Separator...)} with the same separators to be able
-   * to recover the original.
-   *
-   * @param token containing possible separators that need to be encoded.
-   * @param separators to be encoded in the token with their URLEncoding
-   *          equivalent.
-   * @return String representation of the token with occurrences of the URL
-   *         encoded separators decoded.
-   */
-  public static String decode(String token, Separator... separators) {
-    if (token == null) {
-      return null;
-    }
-    String result = token;
-    for (Separator separator : separators) {
-      if (separator != null) {
-        result = decodeSingle(result, separator);
-      }
-    }
-    // decode percent to de-escape
-    return decodePercent(result);
-  }
-
-  /**
-   * Returns a single byte array containing all of the individual arrays
-   * components separated by this separator.
-   *
-   * @param components Byte array components to be joined together.
-   * @return byte array after joining the components
-   */
-  public byte[] join(byte[]... components) {
-    if (components == null || components.length == 0) {
-      return EMPTY_BYTES;
-    }
-
-    int finalSize = 0;
-    finalSize = this.value.length() * (components.length - 1);
-    for (byte[] comp : components) {
-      if (comp != null) {
-        finalSize += comp.length;
-      }
-    }
-
-    byte[] buf = new byte[finalSize];
-    int offset = 0;
-    for (int i = 0; i < components.length; i++) {
-      if (components[i] != null) {
-        System.arraycopy(components[i], 0, buf, offset, components[i].length);
-        offset += components[i].length;
-      }
-      if (i < (components.length - 1)) {
-        System.arraycopy(this.bytes, 0, buf, offset, this.value.length());
-        offset += this.value.length();
-      }
-    }
-    return buf;
-  }
-
-  /**
-   * Concatenates items (as String), using this separator.
-   *
-   * @param items Items join, {@code toString()} will be called in each item.
-   *          Any occurrence of the separator in the individual strings will be
-   *          first encoded. Cannot be null.
-   * @return non-null joined result. Note that when separator is {@literal null}
-   *         the result is simply all items concatenated and the process is not
-   *         reversible through {@link #splitEncoded(String)}
-   */
-  public String joinEncoded(String... items) {
-    if (items == null || items.length == 0) {
-      return "";
-    }
-
-    StringBuilder sb = new StringBuilder(encode(items[0].toString()));
-    // Start at 1, we've already grabbed the first value at index 0
-    for (int i = 1; i < items.length; i++) {
-      sb.append(this.value);
-      sb.append(encode(items[i].toString()));
-    }
-
-    return sb.toString();
-  }
-
-  /**
-   * Concatenates items (as String), using this separator.
-   *
-   * @param items Items join, {@code toString()} will be called in each item.
-   *          Any occurrence of the separator in the individual strings will be
-   *          first encoded. Cannot be null.
-   * @return non-null joined result. Note that when separator is {@literal null}
-   *         the result is simply all items concatenated and the process is not
-   *         reversible through {@link #splitEncoded(String)}
-   */
-  public String joinEncoded(Iterable<?> items) {
-    if (items == null) {
-      return "";
-    }
-    Iterator<?> i = items.iterator();
-    if (!i.hasNext()) {
-      return "";
-    }
-
-    StringBuilder sb = new StringBuilder(encode(i.next().toString()));
-    while (i.hasNext()) {
-      sb.append(this.value);
-      sb.append(encode(i.next().toString()));
-    }
-
-    return sb.toString();
-  }
-
-  /**
-   * @param compoundValue containing individual values separated by this
-   *          separator, which have that separator encoded.
-   * @return non-null set of values from the compoundValue with the separator
-   *         decoded.
-   */
-  public Collection<String> splitEncoded(String compoundValue) {
-    List<String> result = new ArrayList<String>();
-    if (compoundValue != null) {
-      for (String val : valuePattern.split(compoundValue)) {
-        result.add(decode(val));
-      }
-    }
-    return result;
-  }
-
-  /**
-   * Splits the source array into multiple array segments using this separator,
-   * up to a maximum of count items. This will naturally produce copied byte
-   * arrays for each of the split segments.
-   *
-   * @param source to be split
-   * @param limit on how many segments are supposed to be returned. A
-   *          non-positive value indicates no limit on number of segments.
-   * @return source split by this separator.
-   */
-  public byte[][] split(byte[] source, int limit) {
-    return split(source, this.bytes, limit);
-  }
-
-  /**
-   * Splits the source array into multiple array segments using this separator.
-   * The sizes indicate the sizes of the relative components/segments.
-   * In case one of the segments contains this separator before the specified
-   * size is reached, the separator will be considered part of that segment and
-   * we will continue till size is reached.
-   * Variable length strings cannot contain this separator and are indiced with
-   * a size of {@value #VARIABLE_SIZE}. Such strings are encoded for this
-   * separator and decoded after the results from split is returned.
-   *
-   * @param source byte array to be split.
-   * @param sizes sizes of relative components/segments.
-   * @return source split by this separator as per the sizes specified..
-   */
-  public byte[][] split(byte[] source, int[] sizes) {
-    return split(source, this.bytes, sizes);
-  }
-
-  /**
-   * Splits the source array into multiple array segments using this separator,
-   * as many times as splits are found. This will naturally produce copied byte
-   * arrays for each of the split segments.
-   *
-   * @param source byte array to be split
-   * @return source split by this separator.
-   */
-  public byte[][] split(byte[] source) {
-    return split(source, this.bytes);
-  }
-
-  /**
-   * Returns a list of ranges identifying [start, end) -- closed, open --
-   * positions within the source byte array that would be split using the
-   * separator byte array.
-   * The sizes indicate the sizes of the relative components/segments.
-   * In case one of the segments contains this separator before the specified
-   * size is reached, the separator will be considered part of that segment and
-   * we will continue till size is reached.
-   * Variable length strings cannot contain this separator and are indiced with
-   * a size of {@value #VARIABLE_SIZE}. Such strings are encoded for this
-   * separator and decoded after the results from split is returned.
-   *
-   * @param source the source data
-   * @param separator the separator pattern to look for
-   * @param sizes indicate the sizes of the relative components/segments.
-   * @return a list of ranges.
-   */
-  private static List<Range> splitRanges(byte[] source, byte[] separator,
-      int[] sizes) {
-    List<Range> segments = new ArrayList<Range>();
-    if (source == null || separator == null) {
-      return segments;
-    }
-    // VARIABLE_SIZE here indicates that there is no limit to number of segments
-    // to return.
-    int limit = VARIABLE_SIZE;
-    if (sizes != null && sizes.length > 0) {
-      limit = sizes.length;
-    }
-    int start = 0;
-    int currentSegment = 0;
-    itersource: for (int i = 0; i < source.length; i++) {
-      for (int j = 0; j < separator.length; j++) {
-        if (source[i + j] != separator[j]) {
-          continue itersource;
-        }
-      }
-      // all separator elements matched
-      if (limit > VARIABLE_SIZE) {
-        if (segments.size() >= (limit - 1)) {
-          // everything else goes in one final segment
-          break;
-        }
-        if (sizes != null) {
-          int currentSegExpectedSize = sizes[currentSegment];
-          if (currentSegExpectedSize > VARIABLE_SIZE) {
-            int currentSegSize = i - start;
-            if (currentSegSize < currentSegExpectedSize) {
-              // Segment not yet complete. More bytes to parse.
-              continue itersource;
-            } else if (currentSegSize > currentSegExpectedSize) {
-              // Segment is not as per size.
-              throw new IllegalArgumentException(
-                  "Segments not separated as per expected sizes");
-            }
-          }
-        }
-      }
-      segments.add(new Range(start, i));
-      start = i + separator.length;
-      // i will be incremented again in outer for loop
-      i += separator.length - 1;
-      currentSegment++;
-    }
-    // add in remaining to a final range
-    if (start <= source.length) {
-      if (sizes != null) {
-        // Check if final segment is as per size specified.
-        if (sizes[currentSegment] > VARIABLE_SIZE &&
-            source.length - start > sizes[currentSegment]) {
-          // Segment is not as per size.
-          throw new IllegalArgumentException(
-              "Segments not separated as per expected sizes");
-        }
-      }
-      segments.add(new Range(start, source.length));
-    }
-    return segments;
-  }
-
-  /**
-   * Splits based on segments calculated based on limit/sizes specified for the
-   * separator.
-   *
-   * @param source byte array to be split.
-   * @param segments specifies the range for each segment.
-   * @return a byte[][] split as per the segment ranges.
-   */
-  private static byte[][] split(byte[] source, List<Range> segments) {
-    byte[][] splits = new byte[segments.size()][];
-    for (int i = 0; i < segments.size(); i++) {
-      Range r = segments.get(i);
-      byte[] tmp = new byte[r.length()];
-      if (tmp.length > 0) {
-        System.arraycopy(source, r.start(), tmp, 0, r.length());
-      }
-      splits[i] = tmp;
-    }
-    return splits;
-  }
-
-  /**
-   * Splits the source array into multiple array segments using the given
-   * separator based on the sizes. This will naturally produce copied byte
-   * arrays for each of the split segments.
-   *
-   * @param source source array.
-   * @param separator separator represented as a byte array.
-   * @param sizes sizes of relative components/segments.
-   * @return byte[][] after splitting the source.
-   */
-  private static byte[][] split(byte[] source, byte[] separator, int[] sizes) {
-    List<Range> segments = splitRanges(source, separator, sizes);
-    return split(source, segments);
-  }
-
-  /**
-   * Splits the source array into multiple array segments using the given
-   * separator. This will naturally produce copied byte arrays for each of the
-   * split segments.
-   *
-   * @param source Source array.
-   * @param separator Separator represented as a byte array.
-   * @return byte[][] after splitting the source.
-   */
-  private static byte[][] split(byte[] source, byte[] separator) {
-    return split(source, separator, (int[]) null);
-  }
-
-  /**
-   * Splits the source array into multiple array segments using the given
-   * separator, up to a maximum of count items. This will naturally produce
-   * copied byte arrays for each of the split segments.
-   *
-   * @param source Source array.
-   * @param separator Separator represented as a byte array.
-   * @param limit a non-positive value indicates no limit on number of segments.
-   * @return byte[][] after splitting the input source.
-   */
-  private static byte[][] split(byte[] source, byte[] separator, int limit) {
-    int[] sizes = null;
-    if (limit > VARIABLE_SIZE) {
-      sizes = new int[limit];
-    }
-    List<Range> segments = splitRanges(source, separator, sizes);
-    return split(source, segments);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/StringKeyConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/StringKeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/StringKeyConverter.java
deleted file mode 100644
index 282848e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/StringKeyConverter.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-/**
- * Encodes and decodes column names / row keys which are merely strings.
- * Column prefixes are not part of the column name passed for encoding. It is
- * added later, if required in the associated ColumnPrefix implementations.
- */
-public final class StringKeyConverter implements KeyConverter<String> {
-
-  public StringKeyConverter() {
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
-   * #encode(java.lang.Object)
-   */
-  @Override
-  public byte[] encode(String key) {
-    return Separator.encode(key, Separator.SPACE, Separator.TAB);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
-   * #decode(byte[])
-   */
-  @Override
-  public String decode(byte[] bytes) {
-    return Separator.decode(bytes, Separator.TAB, Separator.SPACE);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
deleted file mode 100644
index 8e6c259..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * contains the constants used in the context of schema accesses for
- * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
- * information.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public final class TimelineHBaseSchemaConstants {
-  private TimelineHBaseSchemaConstants() {
-  }
-
-  /**
-   * Used to create a pre-split for tables starting with a username in the
-   * prefix. TODO: this may have to become a config variable (string with
-   * separators) so that different installations can presplit based on their own
-   * commonly occurring names.
-   */
-  private final static byte[][] USERNAME_SPLITS = {
-      Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
-      Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
-      Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
-      Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
-      Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
-      Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
-      Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
-      Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
-      Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
-      Bytes.toBytes("y"), Bytes.toBytes("z")
-  };
-
-  /**
-   * The length at which keys auto-split.
-   */
-  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
-
-  /**
-   * @return splits for splits where a user is a prefix.
-   */
-  public static byte[][] getUsernameSplits() {
-    byte[][] kloon = USERNAME_SPLITS.clone();
-    // Deep copy.
-    for (int row = 0; row < USERNAME_SPLITS.length; row++) {
-      kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
-    }
-    return kloon;
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
deleted file mode 100644
index d03b37d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-/**
- * Utility class that allows HBase coprocessors to interact with unique
- * timestamps.
- */
-public class TimestampGenerator {
-
-  /*
-   * if this is changed, then reading cell timestamps written with older
-   * multiplier value will not work
-   */
-  public static final long TS_MULTIPLIER = 1000000L;
-
-  private final AtomicLong lastTimestamp = new AtomicLong();
-
-  /**
-   * Returns the current wall clock time in milliseconds, multiplied by the
-   * required precision.
-   *
-   * @return current timestamp.
-   */
-  public long currentTime() {
-    // We want to align cell timestamps with current time.
-    // cell timestamps are not be less than
-    // System.currentTimeMillis() * TS_MULTIPLIER.
-    return System.currentTimeMillis() * TS_MULTIPLIER;
-  }
-
-  /**
-   * Returns a timestamp value unique within the scope of this
-   * {@code TimestampGenerator} instance. For usage by HBase
-   * {@code RegionObserver} coprocessors, this normally means unique within a
-   * given region.
-   *
-   * Unlikely scenario of generating a non-unique timestamp: if there is a
-   * sustained rate of more than 1M hbase writes per second AND if region fails
-   * over within that time range of timestamps being generated then there may be
-   * collisions writing to a cell version of the same column.
-   *
-   * @return unique timestamp.
-   */
-  public long getUniqueTimestamp() {
-    long lastTs;
-    long nextTs;
-    do {
-      lastTs = lastTimestamp.get();
-      nextTs = Math.max(lastTs + 1, currentTime());
-    } while (!lastTimestamp.compareAndSet(lastTs, nextTs));
-    return nextTs;
-  }
-
-  /**
-   * Returns a timestamp multiplied with TS_MULTIPLIER and last few digits of
-   * application id.
-   *
-   * Unlikely scenario of generating a timestamp that is a duplicate: If more
-   * than a 1M concurrent apps are running in one flow run AND write to same
-   * column at the same time, then say appId of 1M and 1 will overlap
-   * with appId of 001 and there may be collisions for that flow run's
-   * specific column.
-   *
-   * @param incomingTS Timestamp to be converted.
-   * @param appId Application Id.
-   * @return a timestamp multiplied with TS_MULTIPLIER and last few digits of
-   *         application id
-   */
-  public static long getSupplementedTimestamp(long incomingTS, String appId) {
-    long suffix = getAppIdSuffix(appId);
-    long outgoingTS = incomingTS * TS_MULTIPLIER + suffix;
-    return outgoingTS;
-
-  }
-
-  private static long getAppIdSuffix(String appIdStr) {
-    if (appIdStr == null) {
-      return 0L;
-    }
-    ApplicationId appId = ApplicationId.fromString(appIdStr);
-    long id = appId.getId() % TS_MULTIPLIER;
-    return id;
-  }
-
-  /**
-   * truncates the last few digits of the timestamp which were supplemented by
-   * the TimestampGenerator#getSupplementedTimestamp function.
-   *
-   * @param incomingTS Timestamp to be truncated.
-   * @return a truncated timestamp value
-   */
-  public static long getTruncatedTimestamp(long incomingTS) {
-    return incomingTS / TS_MULTIPLIER;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TypedBufferedMutator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TypedBufferedMutator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TypedBufferedMutator.java
deleted file mode 100644
index 64a11f8..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TypedBufferedMutator.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.hadoop.hbase.client.BufferedMutator;
-
-/**
- * Just a typed wrapper around {@link BufferedMutator} used to ensure that
- * columns can write only to the table mutator for the right table.
- */
-public interface TypedBufferedMutator<T> extends BufferedMutator {
-  // This class is intentionally left (almost) blank
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ValueConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ValueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ValueConverter.java
deleted file mode 100644
index 757a6d3..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ValueConverter.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-
-/**
- * Converter used to encode/decode value associated with a column prefix or a
- * column.
- */
-public interface ValueConverter {
-
-  /**
-   * Encode an object as a byte array depending on the converter implementation.
-   *
-   * @param value Value to be encoded.
-   * @return a byte array
-   * @throws IOException if any problem is encountered while encoding.
-   */
-  byte[] encodeValue(Object value) throws IOException;
-
-  /**
-   * Decode a byte array and convert it into an object depending on the
-   * converter implementation.
-   *
-   * @param bytes Byte array to be decoded.
-   * @return an object
-   * @throws IOException if any problem is encountered while decoding.
-   */
-  Object decodeValue(byte[] bytes) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
deleted file mode 100644
index 0df5b8a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.server.timelineservice.storage.common contains
- * a set of utility classes used across backend storage reader and writer.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
deleted file mode 100644
index b228d84..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link EntityTable}.
- */
-public enum EntityColumn implements Column<EntityTable> {
-
-  /**
-   * Identifier for the entity.
-   */
-  ID(EntityColumnFamily.INFO, "id"),
-
-  /**
-   * The type of entity.
-   */
-  TYPE(EntityColumnFamily.INFO, "type"),
-
-  /**
-   * When the entity was created.
-   */
-  CREATED_TIME(EntityColumnFamily.INFO, "created_time", new LongConverter()),
-
-  /**
-   * The version of the flow that this entity belongs to.
-   */
-  FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper<EntityTable> column;
-  private final ColumnFamily<EntityTable> columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  EntityColumn(ColumnFamily<EntityTable> columnFamily,
-      String columnQualifier) {
-    this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  EntityColumn(ColumnFamily<EntityTable> columnFamily,
-      String columnQualifier, ValueConverter converter) {
-    this.columnFamily = columnFamily;
-    this.columnQualifier = columnQualifier;
-    // Future-proof by ensuring the right column prefix hygiene.
-    this.columnQualifierBytes =
-        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-    this.column = new ColumnHelper<EntityTable>(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-    return columnQualifier;
-  }
-
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<EntityTable> tableMutator, Long timestamp,
-      Object inputValue, Attribute... attributes) throws IOException {
-    column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-        inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-    return column.readResult(result, columnQualifierBytes);
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-    return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnFamily.java
deleted file mode 100644
index 7c63727..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnFamily.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents the entity table column families.
- */
-public enum EntityColumnFamily implements ColumnFamily<EntityTable> {
-
-  /**
-   * Info column family houses known columns, specifically ones included in
-   * columnfamily filters.
-   */
-  INFO("i"),
-
-  /**
-   * Configurations are in a separate column family for two reasons: a) the size
-   * of the config values can be very large and b) we expect that config values
-   * are often separately accessed from other metrics and info columns.
-   */
-  CONFIGS("c"),
-
-  /**
-   * Metrics have a separate column family, because they have a separate TTL.
-   */
-  METRICS("m");
-
-  /**
-   * Byte representation of this column family.
-   */
-  private final byte[] bytes;
-
-  /**
-   * @param value create a column family with this name. Must be lower case and
-   *          without spaces.
-   */
-  EntityColumnFamily(String value) {
-    // column families should be lower case and not contain any spaces.
-    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
-  }
-
-  public byte[] getBytes() {
-    return Bytes.copy(bytes);
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: YARN-7947. Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps. Contributed by Eric Payne.

Posted by ha...@apache.org.
YARN-7947. Capacity Scheduler intra-queue preemption can NPE for non-schedulable apps. Contributed by Eric Payne.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdd2a184
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdd2a184
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdd2a184

Branch: refs/heads/HDFS-12996
Commit: bdd2a184d78379d99c802a43ebec7d2cef0bbaf7
Parents: 86b227a
Author: Sunil G <su...@apache.org>
Authored: Wed Feb 21 14:35:57 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Feb 21 14:35:57 2018 +0530

----------------------------------------------------------------------
 .../monitor/capacity/FifoIntraQueuePreemptionPlugin.java           | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdd2a184/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
index 3332f2a..1776bd4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java
@@ -412,7 +412,7 @@ public class FifoIntraQueuePreemptionPlugin
         TempUserPerPartition tmpUser = new TempUserPerPartition(
             tq.leafQueue.getUser(userName), tq.queueName,
             Resources.clone(userResourceUsage.getUsed(partition)),
-            Resources.clone(userSpecificAmUsed),
+            Resources.clone(amUsed),
             Resources.clone(userResourceUsage.getReserved(partition)),
             Resources.none());
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
new file mode 100644
index 0000000..2b98eec
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
@@ -0,0 +1,313 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Set of utility methods used by timeline filter classes.
+ */
+public final class TimelineFilterUtils {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TimelineFilterUtils.class);
+
+  private TimelineFilterUtils() {
+  }
+
+  /**
+   * Returns the equivalent HBase filter list's {@link Operator}.
+   *
+   * @param op timeline filter list operator.
+   * @return HBase filter list's Operator.
+   */
+  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
+    switch (op) {
+    case AND:
+      return Operator.MUST_PASS_ALL;
+    case OR:
+      return Operator.MUST_PASS_ONE;
+    default:
+      throw new IllegalArgumentException("Invalid operator");
+    }
+  }
+
+  /**
+   * Returns the equivalent HBase compare filter's {@link CompareOp}.
+   *
+   * @param op timeline compare op.
+   * @return HBase compare filter's CompareOp.
+   */
+  private static CompareOp getHBaseCompareOp(
+      TimelineCompareOp op) {
+    switch (op) {
+    case LESS_THAN:
+      return CompareOp.LESS;
+    case LESS_OR_EQUAL:
+      return CompareOp.LESS_OR_EQUAL;
+    case EQUAL:
+      return CompareOp.EQUAL;
+    case NOT_EQUAL:
+      return CompareOp.NOT_EQUAL;
+    case GREATER_OR_EQUAL:
+      return CompareOp.GREATER_OR_EQUAL;
+    case GREATER_THAN:
+      return CompareOp.GREATER;
+    default:
+      throw new IllegalArgumentException("Invalid compare operator");
+    }
+  }
+
+  /**
+   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
+   * {@link QualifierFilter}.
+   * @param colPrefix
+   * @param filter
+   * @return a {@link QualifierFilter} object
+   */
+  private static <T extends BaseTable<T>> Filter createHBaseColQualPrefixFilter(
+      ColumnPrefix<T> colPrefix, TimelinePrefixFilter filter) {
+    return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
+        new BinaryPrefixComparator(
+            colPrefix.getColumnPrefixBytes(filter.getPrefix())));
+  }
+
+  /**
+   * Create a HBase {@link QualifierFilter} for the passed column prefix and
+   * compare op.
+   *
+   * @param <T> Describes the type of column prefix.
+   * @param compareOp compare op.
+   * @param columnPrefix column prefix.
+   * @return a column qualifier filter.
+   */
+  public static <T extends BaseTable<T>> Filter createHBaseQualifierFilter(
+      CompareOp compareOp, ColumnPrefix<T> columnPrefix) {
+    return new QualifierFilter(compareOp,
+        new BinaryPrefixComparator(
+            columnPrefix.getColumnPrefixBytes("")));
+  }
+
+  /**
+   * Create filters for confs or metrics to retrieve. This list includes a
+   * configs/metrics family filter and relevant filters for confs/metrics to
+   * retrieve, if present.
+   *
+   * @param <T> Describes the type of column prefix.
+   * @param confsOrMetricToRetrieve configs/metrics to retrieve.
+   * @param columnFamily config or metric column family.
+   * @param columnPrefix config or metric column prefix.
+   * @return a filter list.
+   * @throws IOException if any problem occurs while creating the filters.
+   */
+  public static <T extends BaseTable<T>> Filter
+      createFilterForConfsOrMetricsToRetrieve(
+      TimelineFilterList confsOrMetricToRetrieve, ColumnFamily<T> columnFamily,
+      ColumnPrefix<T> columnPrefix) throws IOException {
+    Filter familyFilter = new FamilyFilter(CompareOp.EQUAL,
+        new BinaryComparator(columnFamily.getBytes()));
+    if (confsOrMetricToRetrieve != null &&
+        !confsOrMetricToRetrieve.getFilterList().isEmpty()) {
+      // If confsOrMetricsToRetrive are specified, create a filter list based
+      // on it and family filter.
+      FilterList filter = new FilterList(familyFilter);
+      filter.addFilter(
+          createHBaseFilterList(columnPrefix, confsOrMetricToRetrieve));
+      return filter;
+    } else {
+      // Only the family filter needs to be added.
+      return familyFilter;
+    }
+  }
+
+  /**
+   * Create 2 HBase {@link SingleColumnValueFilter} filters for the specified
+   * value range represented by start and end value and wraps them inside a
+   * filter list. Start and end value should not be null.
+   *
+   * @param <T> Describes the type of column prefix.
+   * @param column Column for which single column value filter is to be created.
+   * @param startValue Start value.
+   * @param endValue End value.
+   * @return 2 single column value filters wrapped in a filter list.
+   * @throws IOException if any problem is encountered while encoding value.
+   */
+  public static <T extends BaseTable<T>> FilterList
+      createSingleColValueFiltersByRange(Column<T> column,
+          Object startValue, Object endValue) throws IOException {
+    FilterList list = new FilterList();
+    Filter singleColValFilterStart = createHBaseSingleColValueFilter(
+        column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
+        column.getValueConverter().encodeValue(startValue),
+        CompareOp.GREATER_OR_EQUAL, true);
+    list.addFilter(singleColValFilterStart);
+
+    Filter singleColValFilterEnd = createHBaseSingleColValueFilter(
+        column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
+        column.getValueConverter().encodeValue(endValue),
+        CompareOp.LESS_OR_EQUAL, true);
+    list.addFilter(singleColValFilterEnd);
+    return list;
+  }
+
+  /**
+   * Creates a HBase {@link SingleColumnValueFilter} with specified column.
+   * @param <T> Describes the type of column prefix.
+   * @param column Column which value to be filtered.
+   * @param value Value to be filtered.
+   * @param op Compare operator
+   * @return a SingleColumnValue Filter
+   * @throws IOException if any exception.
+   */
+  public static <T extends BaseTable<T>> Filter
+      createHBaseSingleColValueFilter(Column<T> column,
+          Object value, CompareOp op) throws IOException {
+    Filter singleColValFilter = createHBaseSingleColValueFilter(
+        column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
+        column.getValueConverter().encodeValue(value), op, true);
+    return singleColValFilter;
+  }
+
+  /**
+   * Creates a HBase {@link SingleColumnValueFilter}.
+   *
+   * @param columnFamily Column Family represented as bytes.
+   * @param columnQualifier Column Qualifier represented as bytes.
+   * @param value Value.
+   * @param compareOp Compare operator.
+   * @param filterIfMissing This flag decides if we should filter the row if the
+   *     specified column is missing. This is based on the filter's keyMustExist
+   *     field.
+   * @return a {@link SingleColumnValueFilter} object
+   * @throws IOException
+   */
+  private static SingleColumnValueFilter createHBaseSingleColValueFilter(
+      byte[] columnFamily, byte[] columnQualifier, byte[] value,
+      CompareOp compareOp, boolean filterIfMissing) throws IOException {
+    SingleColumnValueFilter singleColValFilter =
+        new SingleColumnValueFilter(columnFamily, columnQualifier, compareOp,
+        new BinaryComparator(value));
+    singleColValFilter.setLatestVersionOnly(true);
+    singleColValFilter.setFilterIfMissing(filterIfMissing);
+    return singleColValFilter;
+  }
+
+  /**
+   * Fetch columns from filter list containing exists and multivalue equality
+   * filters. This is done to fetch only required columns from back-end and
+   * then match event filters or relationships in reader.
+   *
+   * @param filterList filter list.
+   * @return set of columns.
+   */
+  public static Set<String> fetchColumnsFromFilterList(
+      TimelineFilterList filterList) {
+    Set<String> strSet = new HashSet<String>();
+    for (TimelineFilter filter : filterList.getFilterList()) {
+      switch(filter.getFilterType()) {
+      case LIST:
+        strSet.addAll(fetchColumnsFromFilterList((TimelineFilterList)filter));
+        break;
+      case KEY_VALUES:
+        strSet.add(((TimelineKeyValuesFilter)filter).getKey());
+        break;
+      case EXISTS:
+        strSet.add(((TimelineExistsFilter)filter).getValue());
+        break;
+      default:
+        LOG.info("Unexpected filter type " + filter.getFilterType());
+        break;
+      }
+    }
+    return strSet;
+  }
+
+  /**
+   * Creates equivalent HBase {@link FilterList} from {@link TimelineFilterList}
+   * while converting different timeline filters(of type {@link TimelineFilter})
+   * into their equivalent HBase filters.
+   *
+   * @param <T> Describes the type of column prefix.
+   * @param colPrefix column prefix which will be used for conversion.
+   * @param filterList timeline filter list which has to be converted.
+   * @return A {@link FilterList} object.
+   * @throws IOException if any problem occurs while creating the filter list.
+   */
+  public static <T extends BaseTable<T>> FilterList createHBaseFilterList(
+      ColumnPrefix<T> colPrefix,
+      TimelineFilterList filterList) throws IOException {
+    FilterList list =
+        new FilterList(getHBaseOperator(filterList.getOperator()));
+    for (TimelineFilter filter : filterList.getFilterList()) {
+      switch(filter.getFilterType()) {
+      case LIST:
+        list.addFilter(createHBaseFilterList(colPrefix,
+            (TimelineFilterList)filter));
+        break;
+      case PREFIX:
+        list.addFilter(createHBaseColQualPrefixFilter(colPrefix,
+            (TimelinePrefixFilter)filter));
+        break;
+      case COMPARE:
+        TimelineCompareFilter compareFilter = (TimelineCompareFilter)filter;
+        list.addFilter(
+            createHBaseSingleColValueFilter(
+                colPrefix.getColumnFamilyBytes(),
+                colPrefix.getColumnPrefixBytes(compareFilter.getKey()),
+                colPrefix.getValueConverter().
+                    encodeValue(compareFilter.getValue()),
+                getHBaseCompareOp(compareFilter.getCompareOp()),
+                compareFilter.getKeyMustExist()));
+        break;
+      case KEY_VALUE:
+        TimelineKeyValueFilter kvFilter = (TimelineKeyValueFilter)filter;
+        list.addFilter(
+            createHBaseSingleColValueFilter(
+                colPrefix.getColumnFamilyBytes(),
+                colPrefix.getColumnPrefixBytes(kvFilter.getKey()),
+                colPrefix.getValueConverter().encodeValue(kvFilter.getValue()),
+                getHBaseCompareOp(kvFilter.getCompareOp()),
+                kvFilter.getKeyMustExist()));
+        break;
+      default:
+        LOG.info("Unexpected filter type " + filter.getFilterType());
+        break;
+      }
+    }
+    return list;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/package-info.java
new file mode 100644
index 0000000..f7c0705
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.server.timelineservice.reader.filter stores
+ * timeline filter implementations.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
new file mode 100644
index 0000000..1ebfab2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.reader.EntityTypeReader;
+import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReader;
+import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReaderFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * HBase based implementation for {@link TimelineReader}.
+ */
+public class HBaseTimelineReaderImpl
+    extends AbstractService implements TimelineReader {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(HBaseTimelineReaderImpl.class);
+
+  private Configuration hbaseConf = null;
+  private Connection conn;
+
+  public HBaseTimelineReaderImpl() {
+    super(HBaseTimelineReaderImpl.class.getName());
+  }
+
+  @Override
+  public void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+    hbaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
+    conn = ConnectionFactory.createConnection(hbaseConf);
+  }
+
+  @Override
+  protected void serviceStop() throws Exception {
+    if (conn != null) {
+      LOG.info("closing the hbase Connection");
+      conn.close();
+    }
+    super.serviceStop();
+  }
+
+  @Override
+  public TimelineEntity getEntity(TimelineReaderContext context,
+      TimelineDataToRetrieve dataToRetrieve) throws IOException {
+    TimelineEntityReader reader =
+        TimelineEntityReaderFactory.createSingleEntityReader(context,
+            dataToRetrieve);
+    return reader.readEntity(hbaseConf, conn);
+  }
+
+  @Override
+  public Set<TimelineEntity> getEntities(TimelineReaderContext context,
+      TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve)
+      throws IOException {
+    TimelineEntityReader reader =
+        TimelineEntityReaderFactory.createMultipleEntitiesReader(context,
+            filters, dataToRetrieve);
+    return reader.readEntities(hbaseConf, conn);
+  }
+
+  @Override
+  public Set<String> getEntityTypes(TimelineReaderContext context)
+      throws IOException {
+    EntityTypeReader reader = new EntityTypeReader(context);
+    return reader.readEntityTypes(hbaseConf, conn);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
new file mode 100644
index 0000000..027505b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
@@ -0,0 +1,611 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.AbstractService;
+import  org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
+import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
+import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This implements a hbase based backend for storing the timeline entity
+ * information.
+ * It writes to multiple tables at the backend
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class HBaseTimelineWriterImpl extends AbstractService implements
+    TimelineWriter {
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(HBaseTimelineWriterImpl.class);
+
+  private Connection conn;
+  private TypedBufferedMutator<EntityTable> entityTable;
+  private TypedBufferedMutator<AppToFlowTable> appToFlowTable;
+  private TypedBufferedMutator<ApplicationTable> applicationTable;
+  private TypedBufferedMutator<FlowActivityTable> flowActivityTable;
+  private TypedBufferedMutator<FlowRunTable> flowRunTable;
+  private TypedBufferedMutator<SubApplicationTable> subApplicationTable;
+
+  /**
+   * Used to convert strings key components to and from storage format.
+   */
+  private final KeyConverter<String> stringKeyConverter =
+      new StringKeyConverter();
+
+  /**
+   * Used to convert Long key components to and from storage format.
+   */
+  private final KeyConverter<Long> longKeyConverter = new LongKeyConverter();
+
+  private enum Tables {
+    APPLICATION_TABLE, ENTITY_TABLE, SUBAPPLICATION_TABLE
+  };
+
+  public HBaseTimelineWriterImpl() {
+    super(HBaseTimelineWriterImpl.class.getName());
+  }
+
+  /**
+   * initializes the hbase connection to write to the entity table.
+   */
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    super.serviceInit(conf);
+    Configuration hbaseConf =
+        HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
+    conn = ConnectionFactory.createConnection(hbaseConf);
+    entityTable = new EntityTableRW().getTableMutator(hbaseConf, conn);
+    appToFlowTable = new AppToFlowTableRW().getTableMutator(hbaseConf, conn);
+    applicationTable =
+        new ApplicationTableRW().getTableMutator(hbaseConf, conn);
+    flowRunTable = new FlowRunTableRW().getTableMutator(hbaseConf, conn);
+    flowActivityTable =
+        new FlowActivityTableRW().getTableMutator(hbaseConf, conn);
+    subApplicationTable =
+        new SubApplicationTableRW().getTableMutator(hbaseConf, conn);
+
+    UserGroupInformation ugi = UserGroupInformation.isSecurityEnabled() ?
+        UserGroupInformation.getLoginUser() :
+        UserGroupInformation.getCurrentUser();
+    LOG.info("Initialized HBaseTimelineWriterImpl UGI to " + ugi);
+  }
+
+  /**
+   * Stores the entire information in TimelineEntities to the timeline store.
+   */
+  @Override
+  public TimelineWriteResponse write(TimelineCollectorContext context,
+      TimelineEntities data, UserGroupInformation callerUgi)
+      throws IOException {
+
+    TimelineWriteResponse putStatus = new TimelineWriteResponse();
+
+    String clusterId = context.getClusterId();
+    String userId = context.getUserId();
+    String flowName = context.getFlowName();
+    String flowVersion = context.getFlowVersion();
+    long flowRunId = context.getFlowRunId();
+    String appId = context.getAppId();
+    String subApplicationUser = callerUgi.getShortUserName();
+
+    // defensive coding to avoid NPE during row key construction
+    if ((flowName == null) || (appId == null) || (clusterId == null)
+        || (userId == null)) {
+      LOG.warn("Found null for one of: flowName=" + flowName + " appId=" + appId
+          + " userId=" + userId + " clusterId=" + clusterId
+          + " . Not proceeding with writing to hbase");
+      return putStatus;
+    }
+
+    for (TimelineEntity te : data.getEntities()) {
+
+      // a set can have at most 1 null
+      if (te == null) {
+        continue;
+      }
+
+      // if the entity is the application, the destination is the application
+      // table
+      boolean isApplication = ApplicationEntity.isApplicationEntity(te);
+      byte[] rowKey;
+      if (isApplication) {
+        ApplicationRowKey applicationRowKey =
+            new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
+                appId);
+        rowKey = applicationRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.APPLICATION_TABLE);
+      } else {
+        EntityRowKey entityRowKey =
+            new EntityRowKey(clusterId, userId, flowName, flowRunId, appId,
+                te.getType(), te.getIdPrefix(), te.getId());
+        rowKey = entityRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.ENTITY_TABLE);
+      }
+
+      if (!isApplication && !userId.equals(subApplicationUser)) {
+        SubApplicationRowKey subApplicationRowKey =
+            new SubApplicationRowKey(subApplicationUser, clusterId,
+                te.getType(), te.getIdPrefix(), te.getId(), userId);
+        rowKey = subApplicationRowKey.getRowKey();
+        store(rowKey, te, flowVersion, Tables.SUBAPPLICATION_TABLE);
+      }
+
+      if (isApplication) {
+        TimelineEvent event =
+            ApplicationEntity.getApplicationEvent(te,
+                ApplicationMetricsConstants.CREATED_EVENT_TYPE);
+        FlowRunRowKey flowRunRowKey =
+            new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
+        if (event != null) {
+          onApplicationCreated(flowRunRowKey, clusterId, appId, userId,
+              flowVersion, te, event.getTimestamp());
+        }
+        // if it's an application entity, store metrics
+        storeFlowMetricsAppRunning(flowRunRowKey, appId, te);
+        // if application has finished, store it's finish time and write final
+        // values of all metrics
+        event = ApplicationEntity.getApplicationEvent(te,
+            ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
+        if (event != null) {
+          onApplicationFinished(flowRunRowKey, flowVersion, appId, te,
+              event.getTimestamp());
+        }
+      }
+    }
+    return putStatus;
+  }
+
+  private void onApplicationCreated(FlowRunRowKey flowRunRowKey,
+      String clusterId, String appId, String userId, String flowVersion,
+      TimelineEntity te, long appCreatedTimeStamp)
+      throws IOException {
+
+    String flowName = flowRunRowKey.getFlowName();
+    Long flowRunId = flowRunRowKey.getFlowRunId();
+
+    // store in App to flow table
+    AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(appId);
+    byte[] rowKey = appToFlowRowKey.getRowKey();
+    ColumnRWHelper.store(rowKey, appToFlowTable,
+        AppToFlowColumnPrefix.FLOW_NAME, clusterId, null, flowName);
+    ColumnRWHelper.store(rowKey, appToFlowTable,
+        AppToFlowColumnPrefix.FLOW_RUN_ID, clusterId, null, flowRunId);
+    ColumnRWHelper.store(rowKey, appToFlowTable, AppToFlowColumnPrefix.USER_ID,
+        clusterId, null, userId);
+
+    // store in flow run table
+    storeAppCreatedInFlowRunTable(flowRunRowKey, appId, te);
+
+    // store in flow activity table
+    byte[] flowActivityRowKeyBytes =
+        new FlowActivityRowKey(flowRunRowKey.getClusterId(),
+            appCreatedTimeStamp, flowRunRowKey.getUserId(), flowName)
+            .getRowKey();
+    byte[] qualifier = longKeyConverter.encode(flowRunRowKey.getFlowRunId());
+    ColumnRWHelper.store(flowActivityRowKeyBytes, flowActivityTable,
+        FlowActivityColumnPrefix.RUN_ID, qualifier, null, flowVersion,
+        AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
+  }
+
+  /*
+   * updates the {@link FlowRunTable} with Application Created information
+   */
+  private void storeAppCreatedInFlowRunTable(FlowRunRowKey flowRunRowKey,
+      String appId, TimelineEntity te) throws IOException {
+    byte[] rowKey = flowRunRowKey.getRowKey();
+    ColumnRWHelper.store(rowKey, flowRunTable, FlowRunColumn.MIN_START_TIME,
+        null, te.getCreatedTime(),
+        AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
+  }
+
+
+  /*
+   * updates the {@link FlowRunTable} and {@link FlowActivityTable} when an
+   * application has finished
+   */
+  private void onApplicationFinished(FlowRunRowKey flowRunRowKey,
+      String flowVersion, String appId, TimelineEntity te,
+      long appFinishedTimeStamp) throws IOException {
+    // store in flow run table
+    storeAppFinishedInFlowRunTable(flowRunRowKey, appId, te,
+        appFinishedTimeStamp);
+
+    // indicate in the flow activity table that the app has finished
+    byte[] rowKey =
+        new FlowActivityRowKey(flowRunRowKey.getClusterId(),
+            appFinishedTimeStamp, flowRunRowKey.getUserId(),
+            flowRunRowKey.getFlowName()).getRowKey();
+    byte[] qualifier = longKeyConverter.encode(flowRunRowKey.getFlowRunId());
+    ColumnRWHelper.store(rowKey, flowActivityTable,
+        FlowActivityColumnPrefix.RUN_ID, qualifier, null, flowVersion,
+        AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
+  }
+
+  /*
+   * Update the {@link FlowRunTable} with Application Finished information
+   */
+  private void storeAppFinishedInFlowRunTable(FlowRunRowKey flowRunRowKey,
+      String appId, TimelineEntity te, long appFinishedTimeStamp)
+      throws IOException {
+    byte[] rowKey = flowRunRowKey.getRowKey();
+    Attribute attributeAppId =
+        AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId);
+    ColumnRWHelper.store(rowKey, flowRunTable, FlowRunColumn.MAX_END_TIME,
+        null, appFinishedTimeStamp, attributeAppId);
+
+    // store the final value of metrics since application has finished
+    Set<TimelineMetric> metrics = te.getMetrics();
+    if (metrics != null) {
+      storeFlowMetrics(rowKey, metrics, attributeAppId,
+          AggregationOperation.SUM_FINAL.getAttribute());
+    }
+  }
+
+  /*
+   * Updates the {@link FlowRunTable} with Application Metrics
+   */
+  private void storeFlowMetricsAppRunning(FlowRunRowKey flowRunRowKey,
+      String appId, TimelineEntity te) throws IOException {
+    Set<TimelineMetric> metrics = te.getMetrics();
+    if (metrics != null) {
+      byte[] rowKey = flowRunRowKey.getRowKey();
+      storeFlowMetrics(rowKey, metrics,
+          AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId),
+          AggregationOperation.SUM.getAttribute());
+    }
+  }
+
+  private void storeFlowMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
+      Attribute... attributes) throws IOException {
+    for (TimelineMetric metric : metrics) {
+      byte[] metricColumnQualifier = stringKeyConverter.encode(metric.getId());
+      Map<Long, Number> timeseries = metric.getValues();
+      for (Map.Entry<Long, Number> timeseriesEntry : timeseries.entrySet()) {
+        Long timestamp = timeseriesEntry.getKey();
+        ColumnRWHelper.store(rowKey, flowRunTable, FlowRunColumnPrefix.METRIC,
+            metricColumnQualifier, timestamp, timeseriesEntry.getValue(),
+            attributes);
+      }
+    }
+  }
+
+  /**
+   * Stores the Relations from the {@linkplain TimelineEntity} object.
+   */
+  private <T extends BaseTable<T>> void storeRelations(byte[] rowKey,
+      Map<String, Set<String>> connectedEntities, ColumnPrefix<T> columnPrefix,
+      TypedBufferedMutator<T> table) throws IOException {
+    if (connectedEntities != null) {
+      for (Map.Entry<String, Set<String>> connectedEntity : connectedEntities
+          .entrySet()) {
+        // id3?id4?id5
+        String compoundValue =
+            Separator.VALUES.joinEncoded(connectedEntity.getValue());
+        ColumnRWHelper.store(rowKey, table, columnPrefix,
+            stringKeyConverter.encode(connectedEntity.getKey()),
+            null, compoundValue);
+      }
+    }
+  }
+
+  /**
+   * Stores information from the {@linkplain TimelineEntity} object.
+   */
+  private void store(byte[] rowKey, TimelineEntity te,
+      String flowVersion,
+      Tables table) throws IOException {
+    switch (table) {
+    case APPLICATION_TABLE:
+      ColumnRWHelper.store(rowKey, applicationTable,
+          ApplicationColumn.ID, null, te.getId());
+      ColumnRWHelper.store(rowKey, applicationTable,
+          ApplicationColumn.CREATED_TIME, null, te.getCreatedTime());
+      ColumnRWHelper.store(rowKey, applicationTable,
+          ApplicationColumn.FLOW_VERSION, null, flowVersion);
+      storeInfo(rowKey, te.getInfo(), flowVersion, ApplicationColumnPrefix.INFO,
+          applicationTable);
+      storeMetrics(rowKey, te.getMetrics(), ApplicationColumnPrefix.METRIC,
+          applicationTable);
+      storeEvents(rowKey, te.getEvents(), ApplicationColumnPrefix.EVENT,
+          applicationTable);
+      storeConfig(rowKey, te.getConfigs(), ApplicationColumnPrefix.CONFIG,
+          applicationTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          ApplicationColumnPrefix.IS_RELATED_TO, applicationTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          ApplicationColumnPrefix.RELATES_TO, applicationTable);
+      break;
+    case ENTITY_TABLE:
+      ColumnRWHelper.store(rowKey, entityTable,
+          EntityColumn.ID, null, te.getId());
+      ColumnRWHelper.store(rowKey, entityTable,
+          EntityColumn.TYPE, null, te.getType());
+      ColumnRWHelper.store(rowKey, entityTable,
+          EntityColumn.CREATED_TIME, null, te.getCreatedTime());
+      ColumnRWHelper.store(rowKey, entityTable,
+          EntityColumn.FLOW_VERSION, null, flowVersion);
+      storeInfo(rowKey, te.getInfo(), flowVersion, EntityColumnPrefix.INFO,
+          entityTable);
+      storeMetrics(rowKey, te.getMetrics(), EntityColumnPrefix.METRIC,
+          entityTable);
+      storeEvents(rowKey, te.getEvents(), EntityColumnPrefix.EVENT,
+          entityTable);
+      storeConfig(rowKey, te.getConfigs(), EntityColumnPrefix.CONFIG,
+          entityTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          EntityColumnPrefix.IS_RELATED_TO, entityTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          EntityColumnPrefix.RELATES_TO, entityTable);
+      break;
+    case SUBAPPLICATION_TABLE:
+      ColumnRWHelper.store(rowKey, subApplicationTable, SubApplicationColumn.ID,
+          null, te.getId());
+      ColumnRWHelper.store(rowKey, subApplicationTable,
+          SubApplicationColumn.TYPE, null, te.getType());
+      ColumnRWHelper.store(rowKey, subApplicationTable,
+          SubApplicationColumn.CREATED_TIME, null, te.getCreatedTime());
+      ColumnRWHelper.store(rowKey, subApplicationTable,
+          SubApplicationColumn.FLOW_VERSION, null, flowVersion);
+      storeInfo(rowKey, te.getInfo(), flowVersion,
+          SubApplicationColumnPrefix.INFO, subApplicationTable);
+      storeMetrics(rowKey, te.getMetrics(), SubApplicationColumnPrefix.METRIC,
+          subApplicationTable);
+      storeEvents(rowKey, te.getEvents(), SubApplicationColumnPrefix.EVENT,
+          subApplicationTable);
+      storeConfig(rowKey, te.getConfigs(), SubApplicationColumnPrefix.CONFIG,
+          subApplicationTable);
+      storeRelations(rowKey, te.getIsRelatedToEntities(),
+          SubApplicationColumnPrefix.IS_RELATED_TO, subApplicationTable);
+      storeRelations(rowKey, te.getRelatesToEntities(),
+          SubApplicationColumnPrefix.RELATES_TO, subApplicationTable);
+      break;
+    default:
+      LOG.info("Invalid table name provided.");
+      break;
+    }
+  }
+
+  /**
+   * stores the info information from {@linkplain TimelineEntity}.
+   */
+  private <T extends BaseTable<T>> void storeInfo(byte[] rowKey,
+      Map<String, Object> info, String flowVersion,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T > table)
+      throws IOException {
+    if (info != null) {
+      for (Map.Entry<String, Object> entry : info.entrySet()) {
+        ColumnRWHelper.store(rowKey, table, columnPrefix,
+            stringKeyConverter.encode(entry.getKey()), null, entry.getValue());
+      }
+    }
+  }
+
+  /**
+   * stores the config information from {@linkplain TimelineEntity}.
+   */
+  private <T extends BaseTable<T>> void storeConfig(
+      byte[] rowKey, Map<String, String> config,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
+    if (config != null) {
+      for (Map.Entry<String, String> entry : config.entrySet()) {
+        byte[] configKey = stringKeyConverter.encode(entry.getKey());
+        ColumnRWHelper.store(rowKey, table, columnPrefix, configKey,
+            null, entry.getValue());
+      }
+    }
+  }
+
+  /**
+   * stores the {@linkplain TimelineMetric} information from the
+   * {@linkplain TimelineEvent} object.
+   */
+  private <T extends BaseTable<T>> void storeMetrics(
+      byte[] rowKey, Set<TimelineMetric> metrics,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
+    if (metrics != null) {
+      for (TimelineMetric metric : metrics) {
+        byte[] metricColumnQualifier =
+            stringKeyConverter.encode(metric.getId());
+        Map<Long, Number> timeseries = metric.getValues();
+        for (Map.Entry<Long, Number> timeseriesEntry : timeseries.entrySet()) {
+          Long timestamp = timeseriesEntry.getKey();
+          ColumnRWHelper.store(rowKey, table, columnPrefix,
+              metricColumnQualifier, timestamp, timeseriesEntry.getValue());
+        }
+      }
+    }
+  }
+
+  /**
+   * Stores the events from the {@linkplain TimelineEvent} object.
+   */
+  private <T extends BaseTable<T>> void storeEvents(
+      byte[] rowKey, Set<TimelineEvent> events,
+      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
+      throws IOException {
+    if (events != null) {
+      for (TimelineEvent event : events) {
+        if (event != null) {
+          String eventId = event.getId();
+          if (eventId != null) {
+            long eventTimestamp = event.getTimestamp();
+            // if the timestamp is not set, use the current timestamp
+            if (eventTimestamp == TimelineEvent.INVALID_TIMESTAMP) {
+              LOG.warn("timestamp is not set for event " + eventId +
+                  "! Using the current timestamp");
+              eventTimestamp = System.currentTimeMillis();
+            }
+            Map<String, Object> eventInfo = event.getInfo();
+            if ((eventInfo == null) || (eventInfo.size() == 0)) {
+              byte[] columnQualifierBytes =
+                  new EventColumnName(eventId, eventTimestamp, null)
+                      .getColumnQualifier();
+              ColumnRWHelper.store(rowKey, table, columnPrefix,
+                  columnQualifierBytes, null, Separator.EMPTY_BYTES);
+            } else {
+              for (Map.Entry<String, Object> info : eventInfo.entrySet()) {
+                // eventId=infoKey
+                byte[] columnQualifierBytes =
+                    new EventColumnName(eventId, eventTimestamp, info.getKey())
+                        .getColumnQualifier();
+                ColumnRWHelper.store(rowKey, table, columnPrefix,
+                    columnQualifierBytes, null, info.getValue());
+              } // for info: eventInfo
+            }
+          }
+        }
+      } // event : events
+    }
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage
+   * .TimelineWriter#aggregate
+   * (org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity,
+   * org.apache
+   * .hadoop.yarn.server.timelineservice.storage.TimelineAggregationTrack)
+   */
+  @Override
+  public TimelineWriteResponse aggregate(TimelineEntity data,
+      TimelineAggregationTrack track) throws IOException {
+    return null;
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter#flush
+   * ()
+   */
+  @Override
+  public void flush() throws IOException {
+    // flush all buffered mutators
+    entityTable.flush();
+    appToFlowTable.flush();
+    applicationTable.flush();
+    flowRunTable.flush();
+    flowActivityTable.flush();
+    subApplicationTable.flush();
+  }
+
+  /**
+   * close the hbase connections The close APIs perform flushing and release any
+   * resources held.
+   */
+  @Override
+  protected void serviceStop() throws Exception {
+    if (entityTable != null) {
+      LOG.info("closing the entity table");
+      // The close API performs flushing and releases any resources held
+      entityTable.close();
+    }
+    if (appToFlowTable != null) {
+      LOG.info("closing the app_flow table");
+      // The close API performs flushing and releases any resources held
+      appToFlowTable.close();
+    }
+    if (applicationTable != null) {
+      LOG.info("closing the application table");
+      applicationTable.close();
+    }
+    if (flowRunTable != null) {
+      LOG.info("closing the flow run table");
+      // The close API performs flushing and releases any resources held
+      flowRunTable.close();
+    }
+    if (flowActivityTable != null) {
+      LOG.info("closing the flowActivityTable table");
+      // The close API performs flushing and releases any resources held
+      flowActivityTable.close();
+    }
+    if (subApplicationTable != null) {
+      subApplicationTable.close();
+    }
+    if (conn != null) {
+      LOG.info("closing the hbase Connection");
+      conn.close();
+    }
+    super.serviceStop();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
new file mode 100644
index 0000000..e9e4770
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -0,0 +1,368 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This creates the schema for a hbase based backend for storing application
+ * timeline information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineSchemaCreator {
+  private TimelineSchemaCreator() {
+  }
+
+  final static String NAME = TimelineSchemaCreator.class.getSimpleName();
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TimelineSchemaCreator.class);
+  private static final String SKIP_EXISTING_TABLE_OPTION_SHORT = "s";
+  private static final String APP_METRICS_TTL_OPTION_SHORT = "ma";
+  private static final String SUB_APP_METRICS_TTL_OPTION_SHORT = "msa";
+  private static final String APP_TABLE_NAME_SHORT = "a";
+  private static final String SUB_APP_TABLE_NAME_SHORT = "sa";
+  private static final String APP_TO_FLOW_TABLE_NAME_SHORT = "a2f";
+  private static final String ENTITY_METRICS_TTL_OPTION_SHORT = "me";
+  private static final String ENTITY_TABLE_NAME_SHORT = "e";
+  private static final String HELP_SHORT = "h";
+  private static final String CREATE_TABLES_SHORT = "c";
+
+  public static void main(String[] args) throws Exception {
+
+    LOG.info("Starting the schema creation");
+    Configuration hbaseConf =
+        HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(
+            new YarnConfiguration());
+    // Grab input args and allow for -Dxyz style arguments
+    String[] otherArgs = new GenericOptionsParser(hbaseConf, args)
+        .getRemainingArgs();
+
+    // Grab the arguments we're looking for.
+    CommandLine commandLine = parseArgs(otherArgs);
+
+    if (commandLine.hasOption(HELP_SHORT)) {
+      // -help option has the highest precedence
+      printUsage();
+    } else if (commandLine.hasOption(CREATE_TABLES_SHORT)) {
+      // Grab the entityTableName argument
+      String entityTableName = commandLine.getOptionValue(
+          ENTITY_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(entityTableName)) {
+        hbaseConf.set(EntityTableRW.TABLE_NAME_CONF_NAME, entityTableName);
+      }
+      // Grab the entity metrics TTL
+      String entityTableMetricsTTL = commandLine.getOptionValue(
+          ENTITY_METRICS_TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(entityTableMetricsTTL)) {
+        int entityMetricsTTL = Integer.parseInt(entityTableMetricsTTL);
+        new EntityTableRW().setMetricsTTL(entityMetricsTTL, hbaseConf);
+      }
+      // Grab the appToflowTableName argument
+      String appToflowTableName = commandLine.getOptionValue(
+          APP_TO_FLOW_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(appToflowTableName)) {
+        hbaseConf.set(
+            AppToFlowTableRW.TABLE_NAME_CONF_NAME, appToflowTableName);
+      }
+      // Grab the applicationTableName argument
+      String applicationTableName = commandLine.getOptionValue(
+          APP_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(applicationTableName)) {
+        hbaseConf.set(ApplicationTableRW.TABLE_NAME_CONF_NAME,
+            applicationTableName);
+      }
+      // Grab the application metrics TTL
+      String applicationTableMetricsTTL = commandLine.getOptionValue(
+          APP_METRICS_TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(applicationTableMetricsTTL)) {
+        int appMetricsTTL = Integer.parseInt(applicationTableMetricsTTL);
+        new ApplicationTableRW().setMetricsTTL(appMetricsTTL, hbaseConf);
+      }
+
+      // Grab the subApplicationTableName argument
+      String subApplicationTableName = commandLine.getOptionValue(
+          SUB_APP_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(subApplicationTableName)) {
+        hbaseConf.set(SubApplicationTableRW.TABLE_NAME_CONF_NAME,
+            subApplicationTableName);
+      }
+      // Grab the subApplication metrics TTL
+      String subApplicationTableMetricsTTL = commandLine
+          .getOptionValue(SUB_APP_METRICS_TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(subApplicationTableMetricsTTL)) {
+        int subAppMetricsTTL = Integer.parseInt(subApplicationTableMetricsTTL);
+        new SubApplicationTableRW().setMetricsTTL(subAppMetricsTTL, hbaseConf);
+      }
+
+      // create all table schemas in hbase
+      final boolean skipExisting = commandLine.hasOption(
+          SKIP_EXISTING_TABLE_OPTION_SHORT);
+      createAllSchemas(hbaseConf, skipExisting);
+    } else {
+      // print usage information if -create is not specified
+      printUsage();
+    }
+  }
+
+  /**
+   * Parse command-line arguments.
+   *
+   * @param args
+   *          command line arguments passed to program.
+   * @return parsed command line.
+   * @throws ParseException
+   */
+  private static CommandLine parseArgs(String[] args) throws ParseException {
+    Options options = new Options();
+
+    // Input
+    Option o = new Option(HELP_SHORT, "help", false, "print help information");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(CREATE_TABLES_SHORT, "create", false,
+        "a mandatory option to create hbase tables");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(ENTITY_TABLE_NAME_SHORT, "entityTableName", true,
+        "entity table name");
+    o.setArgName("entityTableName");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(ENTITY_METRICS_TTL_OPTION_SHORT, "entityMetricsTTL", true,
+        "TTL for metrics column family");
+    o.setArgName("entityMetricsTTL");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(APP_TO_FLOW_TABLE_NAME_SHORT, "appToflowTableName", true,
+        "app to flow table name");
+    o.setArgName("appToflowTableName");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(APP_TABLE_NAME_SHORT, "applicationTableName", true,
+        "application table name");
+    o.setArgName("applicationTableName");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(APP_METRICS_TTL_OPTION_SHORT, "applicationMetricsTTL", true,
+        "TTL for metrics column family");
+    o.setArgName("applicationMetricsTTL");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(SUB_APP_TABLE_NAME_SHORT, "subApplicationTableName", true,
+        "subApplication table name");
+    o.setArgName("subApplicationTableName");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(SUB_APP_METRICS_TTL_OPTION_SHORT, "subApplicationMetricsTTL",
+        true, "TTL for metrics column family");
+    o.setArgName("subApplicationMetricsTTL");
+    o.setRequired(false);
+    options.addOption(o);
+
+    // Options without an argument
+    // No need to set arg name since we do not need an argument here
+    o = new Option(SKIP_EXISTING_TABLE_OPTION_SHORT, "skipExistingTable",
+        false, "skip existing Hbase tables and continue to create new tables");
+    o.setRequired(false);
+    options.addOption(o);
+
+    CommandLineParser parser = new PosixParser();
+    CommandLine commandLine = null;
+    try {
+      commandLine = parser.parse(options, args);
+    } catch (Exception e) {
+      LOG.error("ERROR: " + e.getMessage() + "\n");
+      HelpFormatter formatter = new HelpFormatter();
+      formatter.printHelp(NAME + " ", options, true);
+      System.exit(-1);
+    }
+
+    return commandLine;
+  }
+
+  private static void printUsage() {
+    StringBuilder usage = new StringBuilder("Command Usage: \n");
+    usage.append("TimelineSchemaCreator [-help] Display help info" +
+        " for all commands. Or\n");
+    usage.append("TimelineSchemaCreator -create [OPTIONAL_OPTIONS]" +
+        " Create hbase tables.\n\n");
+    usage.append("The Optional options for creating tables include: \n");
+    usage.append("[-entityTableName <Entity Table Name>] " +
+        "The name of the Entity table\n");
+    usage.append("[-entityMetricsTTL <Entity Table Metrics TTL>]" +
+        " TTL for metrics in the Entity table\n");
+    usage.append("[-appToflowTableName <AppToflow Table Name>]" +
+        " The name of the AppToFlow table\n");
+    usage.append("[-applicationTableName <Application Table Name>]" +
+        " The name of the Application table\n");
+    usage.append("[-applicationMetricsTTL <Application Table Metrics TTL>]" +
+        " TTL for metrics in the Application table\n");
+    usage.append("[-subApplicationTableName <SubApplication Table Name>]" +
+        " The name of the SubApplication table\n");
+    usage.append("[-subApplicationMetricsTTL " +
+        " <SubApplication Table Metrics TTL>]" +
+        " TTL for metrics in the SubApplication table\n");
+    usage.append("[-skipExistingTable] Whether to skip existing" +
+        " hbase tables\n");
+    System.out.println(usage.toString());
+  }
+
+  /**
+   * Create all table schemas and log success or exception if failed.
+   * @param hbaseConf the hbase configuration to create tables with
+   * @param skipExisting whether to skip existing hbase tables
+   */
+  private static void createAllSchemas(Configuration hbaseConf,
+      boolean skipExisting) {
+    List<Exception> exceptions = new ArrayList<>();
+    try {
+      if (skipExisting) {
+        LOG.info("Will skip existing tables and continue on htable creation "
+            + "exceptions!");
+      }
+      createAllTables(hbaseConf, skipExisting);
+      LOG.info("Successfully created HBase schema. ");
+    } catch (IOException e) {
+      LOG.error("Error in creating hbase tables: ", e);
+      exceptions.add(e);
+    }
+
+    if (exceptions.size() > 0) {
+      LOG.warn("Schema creation finished with the following exceptions");
+      for (Exception e : exceptions) {
+        LOG.warn(e.getMessage());
+      }
+      System.exit(-1);
+    } else {
+      LOG.info("Schema creation finished successfully");
+    }
+  }
+
+  @VisibleForTesting
+  public static void createAllTables(Configuration hbaseConf,
+      boolean skipExisting) throws IOException {
+
+    Connection conn = null;
+    try {
+      conn = ConnectionFactory.createConnection(hbaseConf);
+      Admin admin = conn.getAdmin();
+      if (admin == null) {
+        throw new IOException("Cannot create table since admin is null");
+      }
+      try {
+        new EntityTableRW().createTable(admin, hbaseConf);
+      } catch (IOException e) {
+        if (skipExisting) {
+          LOG.warn("Skip and continue on: " + e.getMessage());
+        } else {
+          throw e;
+        }
+      }
+      try {
+        new AppToFlowTableRW().createTable(admin, hbaseConf);
+      } catch (IOException e) {
+        if (skipExisting) {
+          LOG.warn("Skip and continue on: " + e.getMessage());
+        } else {
+          throw e;
+        }
+      }
+      try {
+        new ApplicationTableRW().createTable(admin, hbaseConf);
+      } catch (IOException e) {
+        if (skipExisting) {
+          LOG.warn("Skip and continue on: " + e.getMessage());
+        } else {
+          throw e;
+        }
+      }
+      try {
+        new FlowRunTableRW().createTable(admin, hbaseConf);
+      } catch (IOException e) {
+        if (skipExisting) {
+          LOG.warn("Skip and continue on: " + e.getMessage());
+        } else {
+          throw e;
+        }
+      }
+      try {
+        new FlowActivityTableRW().createTable(admin, hbaseConf);
+      } catch (IOException e) {
+        if (skipExisting) {
+          LOG.warn("Skip and continue on: " + e.getMessage());
+        } else {
+          throw e;
+        }
+      }
+      try {
+        new SubApplicationTableRW().createTable(admin, hbaseConf);
+      } catch (IOException e) {
+        if (skipExisting) {
+          LOG.warn("Skip and continue on: " + e.getMessage());
+        } else {
+          throw e;
+        }
+      }
+    } finally {
+      if (conn != null) {
+        conn.close();
+      }
+    }
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTableRW.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTableRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTableRW.java
new file mode 100644
index 0000000..808994e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTableRW.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Create, read and write to the Application Table.
+ */
+public class ApplicationTableRW extends BaseTableRW<ApplicationTable> {
+  /** application prefix. */
+  private static final String PREFIX =
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "application";
+
+  /** config param name that specifies the application table name. */
+  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
+
+  /**
+   * config param name that specifies the TTL for metrics column family in
+   * application table.
+   */
+  private static final String METRICS_TTL_CONF_NAME = PREFIX
+      + ".table.metrics.ttl";
+
+  /**
+   * config param name that specifies max-versions for metrics column family in
+   * entity table.
+   */
+  private static final String METRICS_MAX_VERSIONS =
+      PREFIX + ".table.metrics.max-versions";
+
+  /** default value for application table name. */
+  private static final String DEFAULT_TABLE_NAME =
+      "timelineservice.application";
+
+  /** default TTL is 30 days for metrics timeseries. */
+  private static final int DEFAULT_METRICS_TTL = 2592000;
+
+  /** default max number of versions. */
+  private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ApplicationTableRW.class);
+
+  public ApplicationTableRW() {
+    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
+   * createTable(org.apache.hadoop.hbase.client.Admin,
+   * org.apache.hadoop.conf.Configuration)
+   */
+  public void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException {
+
+    TableName table = getTableName(hbaseConf);
+    if (admin.tableExists(table)) {
+      // do not disable / delete existing table
+      // similar to the approach taken by map-reduce jobs when
+      // output directory exists
+      throw new IOException("Table " + table.getNameAsString()
+          + " already exists.");
+    }
+
+    HTableDescriptor applicationTableDescp = new HTableDescriptor(table);
+    HColumnDescriptor infoCF =
+        new HColumnDescriptor(ApplicationColumnFamily.INFO.getBytes());
+    infoCF.setBloomFilterType(BloomType.ROWCOL);
+    applicationTableDescp.addFamily(infoCF);
+
+    HColumnDescriptor configCF =
+        new HColumnDescriptor(ApplicationColumnFamily.CONFIGS.getBytes());
+    configCF.setBloomFilterType(BloomType.ROWCOL);
+    configCF.setBlockCacheEnabled(true);
+    applicationTableDescp.addFamily(configCF);
+
+    HColumnDescriptor metricsCF =
+        new HColumnDescriptor(ApplicationColumnFamily.METRICS.getBytes());
+    applicationTableDescp.addFamily(metricsCF);
+    metricsCF.setBlockCacheEnabled(true);
+    // always keep 1 version (the latest)
+    metricsCF.setMinVersions(1);
+    metricsCF.setMaxVersions(
+        hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
+    metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
+        DEFAULT_METRICS_TTL));
+    applicationTableDescp.setRegionSplitPolicyClassName(
+        "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
+    applicationTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
+        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
+    admin.createTable(applicationTableDescp,
+        TimelineHBaseSchemaConstants.getUsernameSplits());
+    LOG.info("Status of table creation for " + table.getNameAsString() + "="
+        + admin.tableExists(table));
+  }
+
+  /**
+   * @param metricsTTL time to live parameter for the metrics in this table.
+   * @param hbaseConf configuration in which to set the metrics TTL config
+   *          variable.
+   */
+  public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
+    hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
new file mode 100644
index 0000000..03f508f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.application
+ * contains classes related to implementation for application table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
new file mode 100644
index 0000000..6460203
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTableRW.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * Create, read and write to the AppToFlow Table.
+ */
+public class AppToFlowTableRW extends BaseTableRW<AppToFlowTable> {
+  /** app_flow prefix. */
+  private static final String PREFIX =
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "app-flow";
+
+  /** config param name that specifies the app_flow table name. */
+  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
+
+  /** default value for app_flow table name. */
+  private static final String DEFAULT_TABLE_NAME = "timelineservice.app_flow";
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AppToFlowTableRW.class);
+
+  public AppToFlowTableRW() {
+    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
+   * createTable(org.apache.hadoop.hbase.client.Admin,
+   * org.apache.hadoop.conf.Configuration)
+   */
+  public void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException {
+
+    TableName table = getTableName(hbaseConf);
+    if (admin.tableExists(table)) {
+      // do not disable / delete existing table
+      // similar to the approach taken by map-reduce jobs when
+      // output directory exists
+      throw new IOException("Table " + table.getNameAsString()
+          + " already exists.");
+    }
+
+    HTableDescriptor appToFlowTableDescp = new HTableDescriptor(table);
+    HColumnDescriptor mappCF =
+        new HColumnDescriptor(AppToFlowColumnFamily.MAPPING.getBytes());
+    mappCF.setBloomFilterType(BloomType.ROWCOL);
+    appToFlowTableDescp.addFamily(mappCF);
+
+    appToFlowTableDescp
+        .setRegionSplitPolicyClassName(
+            "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
+    appToFlowTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
+        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
+    admin.createTable(appToFlowTableDescp,
+        TimelineHBaseSchemaConstants.getUsernameSplits());
+    LOG.info("Status of table creation for " + table.getNameAsString() + "="
+        + admin.tableExists(table));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
new file mode 100644
index 0000000..f01d982
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow
+ * contains classes related to implementation for app to flow table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTableRW.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTableRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTableRW.java
new file mode 100644
index 0000000..12ebce4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTableRW.java
@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+
+/**
+ * Implements behavior common to tables used in the timeline service storage. It
+ * is thread-safe, and can be used by multiple threads concurrently.
+ *
+ * @param <T> reference to the table instance class itself for type safety.
+ */
+public abstract class BaseTableRW<T extends BaseTable<T>> {
+
+  /**
+   * Name of config variable that is used to point to this table.
+   */
+  private final String tableNameConfName;
+
+  /**
+   * Unless the configuration overrides, this will be the default name for the
+   * table when it is created.
+   */
+  private final String defaultTableName;
+
+  /**
+   * @param tableNameConfName name of config variable that is used to point to
+   *          this table.
+   * @param defaultTableName Default table name if table from config is not
+   *          found.
+   */
+  protected BaseTableRW(String tableNameConfName, String defaultTableName) {
+    this.tableNameConfName = tableNameConfName;
+    this.defaultTableName = defaultTableName;
+  }
+
+  /**
+   * Used to create a type-safe mutator for this table.
+   *
+   * @param hbaseConf used to read table name.
+   * @param conn used to create a table from.
+   * @return a type safe {@link BufferedMutator} for the entity table.
+   * @throws IOException if any exception occurs while creating mutator for the
+   *     table.
+   */
+  public TypedBufferedMutator<T> getTableMutator(Configuration hbaseConf,
+      Connection conn) throws IOException {
+
+    TableName tableName = this.getTableName(hbaseConf);
+
+    // Plain buffered mutator
+    BufferedMutator bufferedMutator = conn.getBufferedMutator(tableName);
+
+    // Now make this thing type safe.
+    // This is how service initialization should hang on to this variable, with
+    // the proper type
+    TypedBufferedMutator<T> table =
+        new TypedBufferedMutator<T>(bufferedMutator);
+
+    return table;
+  }
+
+  /**
+   * @param hbaseConf used to read settings that override defaults
+   * @param conn used to create table from
+   * @param scan that specifies what you want to read from this table.
+   * @return scanner for the table.
+   * @throws IOException if any exception occurs while getting the scanner.
+   */
+  public ResultScanner getResultScanner(Configuration hbaseConf,
+      Connection conn, Scan scan) throws IOException {
+    Table table = conn.getTable(getTableName(hbaseConf));
+    return table.getScanner(scan);
+  }
+
+  /**
+   *
+   * @param hbaseConf used to read settings that override defaults
+   * @param conn used to create table from
+   * @param get that specifies what single row you want to get from this table
+   * @return result of get operation
+   * @throws IOException if any exception occurs while getting the result.
+   */
+  public Result getResult(Configuration hbaseConf, Connection conn, Get get)
+      throws IOException {
+    Table table = conn.getTable(getTableName(hbaseConf));
+    return table.get(get);
+  }
+
+  /**
+   * Get the table name for the input table.
+   *
+   * @param conf HBase configuration from which table name will be fetched.
+   * @param tableName name of the table to be fetched
+   * @return A {@link TableName} object.
+   */
+  public static TableName getTableName(Configuration conf, String tableName) {
+    String tableSchemaPrefix =  conf.get(
+        YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX);
+    return TableName.valueOf(tableSchemaPrefix + tableName);
+  }
+
+  /**
+   * Get the table name for this table.
+   *
+   * @param conf HBase configuration from which table name will be fetched.
+   * @return A {@link TableName} object.
+   */
+  public TableName getTableName(Configuration conf) {
+    String tableName = conf.get(tableNameConfName, defaultTableName);
+    return getTableName(conf, tableName);
+  }
+
+  /**
+   * Get the table name based on the input config parameters.
+   *
+   * @param conf HBase configuration from which table name will be fetched.
+   * @param tableNameInConf the table name parameter in conf.
+   * @param defaultTableName the default table name.
+   * @return A {@link TableName} object.
+   */
+  public static TableName getTableName(Configuration conf,
+      String tableNameInConf, String defaultTableName) {
+    String tableName = conf.get(tableNameInConf, defaultTableName);
+    return getTableName(conf, tableName);
+  }
+
+  /**
+   * Used to create the table in HBase. Should be called only once (per HBase
+   * instance).
+   *
+   * @param admin Used for doing HBase table operations.
+   * @param hbaseConf Hbase configuration.
+   * @throws IOException if any exception occurs while creating the table.
+   */
+  public abstract void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException;
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9af30d46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9af30d46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9af30d46

Branch: refs/heads/HDFS-12996
Commit: 9af30d46c6e82332a8eda20cb3eb5f987e25e7a2
Parents: a1e56a6
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Sat Feb 17 20:30:28 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Sat Feb 17 20:30:28 2018 +0530

----------------------------------------------------------------------
 .../resources/assemblies/hadoop-yarn-dist.xml   |  22 +-
 hadoop-project/pom.xml                          |  14 +-
 .../pom.xml                                     |  26 +-
 ...stTimelineReaderWebServicesHBaseStorage.java |   8 +-
 .../storage/TestHBaseTimelineStorageApps.java   |  50 +-
 .../TestHBaseTimelineStorageEntities.java       |  89 +--
 .../storage/TestHBaseTimelineStorageSchema.java |  61 +-
 .../flow/TestHBaseStorageFlowActivity.java      |  24 +-
 .../storage/flow/TestHBaseStorageFlowRun.java   |  42 +-
 .../flow/TestHBaseStorageFlowRunCompaction.java |  58 +-
 .../pom.xml                                     | 219 ++++++
 .../reader/filter/TimelineFilterUtils.java      | 313 ++++++++
 .../reader/filter/package-info.java             |  28 +
 .../storage/HBaseTimelineReaderImpl.java        |  96 +++
 .../storage/HBaseTimelineWriterImpl.java        | 611 ++++++++++++++++
 .../storage/TimelineSchemaCreator.java          | 368 ++++++++++
 .../storage/application/ApplicationTableRW.java | 137 ++++
 .../storage/application/package-info.java       |  28 +
 .../storage/apptoflow/AppToFlowTableRW.java     |  92 +++
 .../storage/apptoflow/package-info.java         |  28 +
 .../storage/common/BaseTableRW.java             | 167 +++++
 .../storage/common/ColumnRWHelper.java          | 487 +++++++++++++
 .../common/HBaseTimelineStorageUtils.java       | 121 +++
 .../common/TimelineHBaseSchemaConstants.java    |  71 ++
 .../storage/common/TypedBufferedMutator.java    |  73 ++
 .../storage/common/package-info.java            |  28 +
 .../storage/entity/EntityTableRW.java           | 136 ++++
 .../storage/entity/package-info.java            |  28 +
 .../storage/flow/FlowActivityTableRW.java       |  91 +++
 .../storage/flow/FlowRunTableRW.java            | 102 +++
 .../storage/flow/package-info.java              |  29 +
 .../timelineservice/storage/package-info.java   |  28 +
 .../reader/AbstractTimelineStorageReader.java   | 159 ++++
 .../storage/reader/ApplicationEntityReader.java | 523 +++++++++++++
 .../storage/reader/EntityTypeReader.java        | 175 +++++
 .../reader/FlowActivityEntityReader.java        | 186 +++++
 .../storage/reader/FlowRunEntityReader.java     | 298 ++++++++
 .../storage/reader/GenericEntityReader.java     | 655 +++++++++++++++++
 .../reader/SubApplicationEntityReader.java      | 489 +++++++++++++
 .../storage/reader/TimelineEntityReader.java    | 464 ++++++++++++
 .../reader/TimelineEntityReaderFactory.java     | 105 +++
 .../storage/reader/package-info.java            |  28 +
 .../subapplication/SubApplicationTableRW.java   | 137 ++++
 .../storage/subapplication/package-info.java    |  28 +
 .../common/TestHBaseTimelineStorageUtils.java   |  33 +
 .../pom.xml                                     | 132 ++++
 .../storage/application/ApplicationColumn.java  | 101 +++
 .../application/ApplicationColumnFamily.java    |  65 ++
 .../application/ApplicationColumnPrefix.java    | 150 ++++
 .../storage/application/ApplicationRowKey.java  | 251 +++++++
 .../application/ApplicationRowKeyPrefix.java    |  69 ++
 .../storage/application/ApplicationTable.java   |  60 ++
 .../storage/application/package-info.java       |  28 +
 .../storage/apptoflow/AppToFlowColumn.java      |  95 +++
 .../apptoflow/AppToFlowColumnFamily.java        |  51 ++
 .../apptoflow/AppToFlowColumnPrefix.java        | 105 +++
 .../storage/apptoflow/AppToFlowRowKey.java      |  58 ++
 .../storage/apptoflow/AppToFlowTable.java       |  60 ++
 .../storage/apptoflow/package-info.java         |  28 +
 .../storage/common/AppIdKeyConverter.java       |  97 +++
 .../storage/common/BaseTable.java               |  27 +
 .../timelineservice/storage/common/Column.java  |  56 ++
 .../storage/common/ColumnFamily.java            |  34 +
 .../storage/common/ColumnHelper.java            | 101 +++
 .../storage/common/ColumnPrefix.java            |  71 ++
 .../storage/common/EventColumnName.java         |  63 ++
 .../common/EventColumnNameConverter.java        |  99 +++
 .../storage/common/GenericConverter.java        |  48 ++
 .../common/HBaseTimelineSchemaUtils.java        | 156 ++++
 .../storage/common/KeyConverter.java            |  41 ++
 .../storage/common/KeyConverterToString.java    |  38 +
 .../storage/common/LongConverter.java           |  94 +++
 .../storage/common/LongKeyConverter.java        |  68 ++
 .../storage/common/NumericValueConverter.java   |  39 +
 .../timelineservice/storage/common/Range.java   |  62 ++
 .../storage/common/RowKeyPrefix.java            |  42 ++
 .../storage/common/Separator.java               | 575 +++++++++++++++
 .../storage/common/StringKeyConverter.java      |  54 ++
 .../storage/common/TimestampGenerator.java      | 116 +++
 .../storage/common/ValueConverter.java          |  47 ++
 .../storage/common/package-info.java            |  28 +
 .../storage/entity/EntityColumn.java            | 105 +++
 .../storage/entity/EntityColumnFamily.java      |  65 ++
 .../storage/entity/EntityColumnPrefix.java      | 162 +++++
 .../storage/entity/EntityRowKey.java            | 299 ++++++++
 .../storage/entity/EntityRowKeyPrefix.java      |  77 ++
 .../storage/entity/EntityTable.java             |  61 ++
 .../storage/entity/package-info.java            |  28 +
 .../flow/AggregationCompactionDimension.java    |  63 ++
 .../storage/flow/AggregationOperation.java      |  94 +++
 .../timelineservice/storage/flow/Attribute.java |  39 +
 .../storage/flow/FlowActivityColumnFamily.java  |  55 ++
 .../storage/flow/FlowActivityColumnPrefix.java  | 133 ++++
 .../storage/flow/FlowActivityRowKey.java        | 247 +++++++
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 ++
 .../storage/flow/FlowActivityTable.java         |  45 ++
 .../storage/flow/FlowRunColumn.java             | 112 +++
 .../storage/flow/FlowRunColumnFamily.java       |  54 ++
 .../storage/flow/FlowRunColumnPrefix.java       | 129 ++++
 .../storage/flow/FlowRunRowKey.java             | 233 ++++++
 .../storage/flow/FlowRunRowKeyPrefix.java       |  54 ++
 .../storage/flow/FlowRunTable.java              |  77 ++
 .../storage/flow/package-info.java              |  29 +
 .../timelineservice/storage/package-info.java   |  28 +
 .../subapplication/SubApplicationColumn.java    |  99 +++
 .../SubApplicationColumnFamily.java             |  68 ++
 .../SubApplicationColumnPrefix.java             | 163 +++++
 .../subapplication/SubApplicationRowKey.java    | 290 ++++++++
 .../SubApplicationRowKeyPrefix.java             |  69 ++
 .../subapplication/SubApplicationTable.java     |  64 ++
 .../storage/subapplication/package-info.java    |  28 +
 .../TestCustomApplicationIdConversion.java      |  39 +
 .../storage/common/TestKeyConverters.java       | 134 ++++
 .../storage/common/TestRowKeys.java             | 276 +++++++
 .../storage/common/TestRowKeysAsString.java     | 144 ++++
 .../storage/common/TestSeparator.java           | 215 ++++++
 .../pom.xml                                     | 161 ++++
 .../src/assembly/coprocessor.xml                |  37 +
 .../common/HBaseTimelineServerUtils.java        | 135 ++++
 .../storage/common/package-info.java            |  28 +
 .../storage/flow/FlowRunCoprocessor.java        | 277 +++++++
 .../storage/flow/FlowScanner.java               | 723 ++++++++++++++++++
 .../storage/flow/FlowScannerOperation.java      |  46 ++
 .../storage/flow/package-info.java              |  29 +
 .../timelineservice/storage/package-info.java   |  28 +
 .../pom.xml                                     | 193 +----
 .../reader/filter/TimelineFilterUtils.java      | 308 --------
 .../reader/filter/package-info.java             |  28 -
 .../storage/HBaseTimelineReaderImpl.java        |  96 ---
 .../storage/HBaseTimelineWriterImpl.java        | 593 ---------------
 .../storage/TimelineSchemaCreator.java          | 367 ----------
 .../storage/application/ApplicationColumn.java  | 108 ---
 .../application/ApplicationColumnFamily.java    |  65 --
 .../application/ApplicationColumnPrefix.java    | 236 ------
 .../storage/application/ApplicationRowKey.java  | 251 -------
 .../application/ApplicationRowKeyPrefix.java    |  69 --
 .../storage/application/ApplicationTable.java   | 170 -----
 .../storage/application/package-info.java       |  28 -
 .../storage/apptoflow/AppToFlowColumn.java      | 101 ---
 .../apptoflow/AppToFlowColumnFamily.java        |  51 --
 .../apptoflow/AppToFlowColumnPrefix.java        | 206 ------
 .../storage/apptoflow/AppToFlowRowKey.java      |  58 --
 .../storage/apptoflow/AppToFlowTable.java       | 125 ----
 .../storage/apptoflow/package-info.java         |  28 -
 .../storage/common/AppIdKeyConverter.java       |  97 ---
 .../storage/common/BaseTable.java               | 167 -----
 .../common/BufferedMutatorDelegator.java        |  73 --
 .../timelineservice/storage/common/Column.java  |  80 --
 .../storage/common/ColumnFamily.java            |  34 -
 .../storage/common/ColumnHelper.java            | 414 -----------
 .../storage/common/ColumnPrefix.java            | 145 ----
 .../storage/common/EventColumnName.java         |  63 --
 .../common/EventColumnNameConverter.java        |  99 ---
 .../storage/common/GenericConverter.java        |  48 --
 .../common/HBaseTimelineStorageUtils.java       | 354 ---------
 .../storage/common/KeyConverter.java            |  41 --
 .../storage/common/KeyConverterToString.java    |  38 -
 .../storage/common/LongConverter.java           |  94 ---
 .../storage/common/LongKeyConverter.java        |  68 --
 .../storage/common/NumericValueConverter.java   |  39 -
 .../timelineservice/storage/common/Range.java   |  62 --
 .../storage/common/RowKeyPrefix.java            |  42 --
 .../storage/common/Separator.java               | 575 ---------------
 .../storage/common/StringKeyConverter.java      |  54 --
 .../common/TimelineHBaseSchemaConstants.java    |  71 --
 .../storage/common/TimestampGenerator.java      | 116 ---
 .../storage/common/TypedBufferedMutator.java    |  28 -
 .../storage/common/ValueConverter.java          |  47 --
 .../storage/common/package-info.java            |  28 -
 .../storage/entity/EntityColumn.java            | 112 ---
 .../storage/entity/EntityColumnFamily.java      |  65 --
 .../storage/entity/EntityColumnPrefix.java      | 249 -------
 .../storage/entity/EntityRowKey.java            | 299 --------
 .../storage/entity/EntityRowKeyPrefix.java      |  77 --
 .../storage/entity/EntityTable.java             | 170 -----
 .../storage/entity/package-info.java            |  28 -
 .../flow/AggregationCompactionDimension.java    |  63 --
 .../storage/flow/AggregationOperation.java      |  94 ---
 .../timelineservice/storage/flow/Attribute.java |  39 -
 .../storage/flow/FlowActivityColumnFamily.java  |  55 --
 .../storage/flow/FlowActivityColumnPrefix.java  | 221 ------
 .../storage/flow/FlowActivityRowKey.java        | 247 -------
 .../storage/flow/FlowActivityRowKeyPrefix.java  |  60 --
 .../storage/flow/FlowActivityTable.java         | 109 ---
 .../storage/flow/FlowRunColumn.java             | 131 ----
 .../storage/flow/FlowRunColumnFamily.java       |  54 --
 .../storage/flow/FlowRunColumnPrefix.java       | 217 ------
 .../storage/flow/FlowRunCoprocessor.java        | 277 -------
 .../storage/flow/FlowRunRowKey.java             | 233 ------
 .../storage/flow/FlowRunRowKeyPrefix.java       |  54 --
 .../storage/flow/FlowRunTable.java              | 151 ----
 .../storage/flow/FlowScanner.java               | 729 -------------------
 .../storage/flow/FlowScannerOperation.java      |  46 --
 .../storage/flow/package-info.java              |  29 -
 .../timelineservice/storage/package-info.java   |  28 -
 .../reader/AbstractTimelineStorageReader.java   | 158 ----
 .../storage/reader/ApplicationEntityReader.java | 520 -------------
 .../storage/reader/EntityTypeReader.java        | 175 -----
 .../reader/FlowActivityEntityReader.java        | 185 -----
 .../storage/reader/FlowRunEntityReader.java     | 294 --------
 .../storage/reader/GenericEntityReader.java     | 651 -----------------
 .../reader/SubApplicationEntityReader.java      | 488 -------------
 .../storage/reader/TimelineEntityReader.java    | 459 ------------
 .../reader/TimelineEntityReaderFactory.java     | 105 ---
 .../storage/reader/package-info.java            |  28 -
 .../subapplication/SubApplicationColumn.java    | 108 ---
 .../SubApplicationColumnFamily.java             |  68 --
 .../SubApplicationColumnPrefix.java             | 250 -------
 .../subapplication/SubApplicationRowKey.java    | 290 --------
 .../SubApplicationRowKeyPrefix.java             |  69 --
 .../subapplication/SubApplicationTable.java     | 174 -----
 .../storage/subapplication/package-info.java    |  28 -
 .../TestCustomApplicationIdConversion.java      |  39 -
 .../common/TestHBaseTimelineStorageUtils.java   |  33 -
 .../storage/common/TestKeyConverters.java       | 134 ----
 .../storage/common/TestRowKeys.java             | 276 -------
 .../storage/common/TestRowKeysAsString.java     | 144 ----
 .../storage/common/TestSeparator.java           | 215 ------
 218 files changed, 15320 insertions(+), 14832 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
----------------------------------------------------------------------
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index a77dd20..2c266b6 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -213,7 +213,11 @@
     </fileSet>
     <!-- Copy dependecies from hadoop-yarn-server-timelineservice as well -->
     <fileSet>
-      <directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/target/lib</directory>
+      <directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/target/lib</directory>
+      <outputDirectory>share/hadoop/${hadoop.component}/timelineservice/lib</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/target/lib</directory>
       <outputDirectory>share/hadoop/${hadoop.component}/timelineservice/lib</outputDirectory>
     </fileSet>
   </fileSets>
@@ -221,12 +225,26 @@
     <moduleSet>
       <includes>
         <include>org.apache.hadoop:hadoop-yarn-server-timelineservice</include>
-        <include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase</include>
+        <include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-client</include>
+        <include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-common</include>
+      </includes>
+      <binaries>
+        <outputDirectory>share/hadoop/${hadoop.component}/timelineservice</outputDirectory>
+        <includeDependencies>false</includeDependencies>
+        <unpack>false</unpack>
+      </binaries>
+    </moduleSet>
+    <moduleSet>
+      <includes>
+        <include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-server</include>
       </includes>
       <binaries>
         <outputDirectory>share/hadoop/${hadoop.component}/timelineservice</outputDirectory>
         <includeDependencies>false</includeDependencies>
+        <!-- This is the id of the timelineservice-hbase-coprocessor assembly descriptor -->
+        <attachmentClassifier>coprocessor</attachmentClassifier>
         <unpack>false</unpack>
+        <outputFileNameMapping>hadoop-yarn-server-timelineservice-hbase-coprocessor-${module.version}.${module.extension}</outputFileNameMapping>
       </binaries>
     </moduleSet>
     <moduleSet>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c27596c..ce51c99 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -397,7 +397,19 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
+        <artifactId>hadoop-yarn-server-timelineservice-hbase-client</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-yarn-server-timelineservice-hbase-server</artifactId>
         <version>${project.version}</version>
       </dependency>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index f36897b..d9f992d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -60,7 +60,31 @@
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
+      <artifactId>hadoop-yarn-server-timelineservice-hbase-client</artifactId>
+      <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
+      <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice-hbase-server</artifactId>
       <scope>test</scope>
       <exclusions>
         <exclusion>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
index b2029ca..33d8dcd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesHBaseStorage.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -70,7 +70,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
     extends AbstractTimelineReaderHBaseTestBase {
   private static long ts = System.currentTimeMillis();
   private static long dayTs =
-      HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(ts);
+      HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(ts);
   private static String doAsUser = "remoteuser";
 
   @BeforeClass
@@ -371,7 +371,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
             BuilderUtils.newApplicationId(timestamp, count++);
         ApplicationEntity appEntity = new ApplicationEntity();
         appEntity.setId(
-            HBaseTimelineStorageUtils.convertApplicationIdToString(appId));
+            HBaseTimelineSchemaUtils.convertApplicationIdToString(appId));
         appEntity.setCreatedTime(timestamp);
 
         TimelineEvent created = new TimelineEvent();
@@ -929,7 +929,7 @@ public class TestTimelineReaderWebServicesHBaseStorage
           new String[] {"flow1"});
 
       long firstFlowActivity =
-          HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(1425016501000L);
+          HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(1425016501000L);
 
       DateFormat fmt = TimelineReaderWebServices.DATE_FORMAT.get();
       uri = URI.create("http://localhost:" + getServerPort() + "/ws/v2/" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index 111008a..bc33427 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -68,10 +68,11 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Fiel
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
@@ -173,7 +174,7 @@ public class TestHBaseTimelineStorageApps {
       scan.setStartRow(Bytes.toBytes(cluster));
       scan.setStopRow(Bytes.toBytes(cluster + "1"));
       Connection conn = ConnectionFactory.createConnection(c1);
-      ResultScanner resultScanner = new ApplicationTable()
+      ResultScanner resultScanner = new ApplicationTableRW()
           .getResultScanner(c1, conn, scan);
 
       assertTrue(resultScanner != null);
@@ -308,7 +309,7 @@ public class TestHBaseTimelineStorageApps {
       Get get = new Get(rowKey);
       get.setMaxVersions(Integer.MAX_VALUE);
       Connection conn = ConnectionFactory.createConnection(c1);
-      Result result = new ApplicationTable().getResult(c1, conn, get);
+      Result result = new ApplicationTableRW().getResult(c1, conn, get);
 
       assertTrue(result != null);
       assertEquals(17, result.size());
@@ -319,24 +320,24 @@ public class TestHBaseTimelineStorageApps {
           appId));
 
       // check info column family
-      String id1 = ApplicationColumn.ID.readResult(result).toString();
+      String id1 =
+          ColumnRWHelper.readResult(result, ApplicationColumn.ID).toString();
       assertEquals(appId, id1);
 
-      Long cTime1 =
-          (Long) ApplicationColumn.CREATED_TIME.readResult(result);
+      Long cTime1 = (Long)
+          ColumnRWHelper.readResult(result, ApplicationColumn.CREATED_TIME);
       assertEquals(cTime, cTime1);
 
-      Map<String, Object> infoColumns =
-          ApplicationColumnPrefix.INFO.readResults(result,
-              new StringKeyConverter());
+      Map<String, Object> infoColumns = ColumnRWHelper.readResults(
+          result, ApplicationColumnPrefix.INFO, new StringKeyConverter());
       assertEquals(infoMap, infoColumns);
 
       // Remember isRelatedTo is of type Map<String, Set<String>>
       for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo
           .entrySet()) {
-        Object isRelatedToValue =
-            ApplicationColumnPrefix.IS_RELATED_TO.readResult(result,
-                isRelatedToEntry.getKey());
+        Object isRelatedToValue = ColumnRWHelper.readResult(
+            result, ApplicationColumnPrefix.IS_RELATED_TO,
+            isRelatedToEntry.getKey());
         String compoundValue = isRelatedToValue.toString();
         // id7?id9?id6
         Set<String> isRelatedToValues =
@@ -351,9 +352,9 @@ public class TestHBaseTimelineStorageApps {
       // RelatesTo
       for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo
           .entrySet()) {
-        String compoundValue =
-            ApplicationColumnPrefix.RELATES_TO.readResult(result,
-                relatesToEntry.getKey()).toString();
+        String compoundValue = ColumnRWHelper.readResult(result,
+            ApplicationColumnPrefix.RELATES_TO, relatesToEntry.getKey())
+            .toString();
         // id3?id4?id5
         Set<String> relatesToValues =
             new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
@@ -366,14 +367,13 @@ public class TestHBaseTimelineStorageApps {
 
       KeyConverter<String> stringKeyConverter = new StringKeyConverter();
       // Configuration
-      Map<String, Object> configColumns =
-          ApplicationColumnPrefix.CONFIG
-              .readResults(result, stringKeyConverter);
+      Map<String, Object> configColumns = ColumnRWHelper.readResults(
+          result, ApplicationColumnPrefix.CONFIG, stringKeyConverter);
       assertEquals(conf, configColumns);
 
       NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
-          ApplicationColumnPrefix.METRIC.readResultsWithTimestamps(result,
-              stringKeyConverter);
+          ColumnRWHelper.readResultsWithTimestamps(
+              result, ApplicationColumnPrefix.METRIC, stringKeyConverter);
 
       NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
       matchMetrics(metricValues, metricMap);
@@ -500,7 +500,7 @@ public class TestHBaseTimelineStorageApps {
     event.addInfo(expKey, expVal);
 
     final TimelineEntity entity = new ApplicationEntity();
-    entity.setId(HBaseTimelineStorageUtils.convertApplicationIdToString(
+    entity.setId(HBaseTimelineSchemaUtils.convertApplicationIdToString(
         ApplicationId.newInstance(0, 1)));
     entity.addEvent(event);
 
@@ -531,7 +531,7 @@ public class TestHBaseTimelineStorageApps {
       Get get = new Get(rowKey);
       get.setMaxVersions(Integer.MAX_VALUE);
       Connection conn = ConnectionFactory.createConnection(c1);
-      Result result = new ApplicationTable().getResult(c1, conn, get);
+      Result result = new ApplicationTableRW().getResult(c1, conn, get);
 
       assertTrue(result != null);
 
@@ -541,8 +541,8 @@ public class TestHBaseTimelineStorageApps {
           appName));
 
       Map<EventColumnName, Object> eventsResult =
-          ApplicationColumnPrefix.EVENT.readResults(result,
-              new EventColumnNameConverter());
+          ColumnRWHelper.readResults(result,
+              ApplicationColumnPrefix.EVENT, new EventColumnNameConverter());
       // there should be only one event
       assertEquals(1, eventsResult.size());
       for (Map.Entry<EventColumnName, Object> e : eventsResult.entrySet()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 5e08999..90a6959 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -62,9 +62,10 @@ import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyVa
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineKeyValuesFilter;
 import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefixFilter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
@@ -73,12 +74,12 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
 import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
 import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
 import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
 import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -208,7 +209,7 @@ public class TestHBaseTimelineStorageEntities {
       String flow = "some_flow_name";
       String flowVersion = "AB7822C10F1111";
       long runid = 1002345678919L;
-      String appName = HBaseTimelineStorageUtils.convertApplicationIdToString(
+      String appName = HBaseTimelineSchemaUtils.convertApplicationIdToString(
           ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1)
       );
       hbi.write(new TimelineCollectorContext(cluster, user, flow, flowVersion,
@@ -224,7 +225,7 @@ public class TestHBaseTimelineStorageEntities {
       s.setStartRow(startRow);
       s.setMaxVersions(Integer.MAX_VALUE);
       Connection conn = ConnectionFactory.createConnection(c1);
-      ResultScanner scanner = new EntityTable().getResultScanner(c1, conn, s);
+      ResultScanner scanner = new EntityTableRW().getResultScanner(c1, conn, s);
 
       int rowCount = 0;
       int colCount = 0;
@@ -238,26 +239,27 @@ public class TestHBaseTimelineStorageEntities {
               entity));
 
           // check info column family
-          String id1 = EntityColumn.ID.readResult(result).toString();
+          String id1 =
+              ColumnRWHelper.readResult(result, EntityColumn.ID).toString();
           assertEquals(id, id1);
 
-          String type1 = EntityColumn.TYPE.readResult(result).toString();
+          String type1 =
+              ColumnRWHelper.readResult(result, EntityColumn.TYPE).toString();
           assertEquals(type, type1);
 
-          Long cTime1 = (Long) EntityColumn.CREATED_TIME.readResult(result);
+          Long cTime1 = (Long)
+              ColumnRWHelper.readResult(result, EntityColumn.CREATED_TIME);
           assertEquals(cTime1, cTime);
 
-          Map<String, Object> infoColumns =
-              EntityColumnPrefix.INFO.readResults(result,
-                  new StringKeyConverter());
+          Map<String, Object> infoColumns = ColumnRWHelper.readResults(
+              result, EntityColumnPrefix.INFO, new StringKeyConverter());
           assertEquals(infoMap, infoColumns);
 
           // Remember isRelatedTo is of type Map<String, Set<String>>
           for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo
               .entrySet()) {
-            Object isRelatedToValue =
-                EntityColumnPrefix.IS_RELATED_TO.readResult(result,
-                    isRelatedToEntry.getKey());
+            Object isRelatedToValue = ColumnRWHelper.readResult(result,
+                EntityColumnPrefix.IS_RELATED_TO, isRelatedToEntry.getKey());
             String compoundValue = isRelatedToValue.toString();
             // id7?id9?id6
             Set<String> isRelatedToValues =
@@ -273,8 +275,9 @@ public class TestHBaseTimelineStorageEntities {
           // RelatesTo
           for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo
               .entrySet()) {
-            String compoundValue = EntityColumnPrefix.RELATES_TO
-                .readResult(result, relatesToEntry.getKey()).toString();
+            String compoundValue = ColumnRWHelper.readResult(result,
+                EntityColumnPrefix.RELATES_TO, relatesToEntry.getKey())
+                .toString();
             // id3?id4?id5
             Set<String> relatesToValues =
                 new HashSet<String>(
@@ -287,13 +290,13 @@ public class TestHBaseTimelineStorageEntities {
           }
 
           // Configuration
-          Map<String, Object> configColumns =
-              EntityColumnPrefix.CONFIG.readResults(result, stringKeyConverter);
+          Map<String, Object> configColumns = ColumnRWHelper.readResults(
+              result, EntityColumnPrefix.CONFIG, stringKeyConverter);
           assertEquals(conf, configColumns);
 
           NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
-              EntityColumnPrefix.METRIC.readResultsWithTimestamps(result,
-                  stringKeyConverter);
+              ColumnRWHelper.readResultsWithTimestamps(
+                  result, EntityColumnPrefix.METRIC, stringKeyConverter);
 
           NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
           matchMetrics(metricValues, metricMap);
@@ -386,14 +389,14 @@ public class TestHBaseTimelineStorageEntities {
       Set<TimelineMetric> metrics, Long cTime, TimelineMetric m1)
       throws IOException {
     Scan s = new Scan();
-    // read from SubApplicationTable
+    // read from SubApplicationTableRW
     byte[] startRow = new SubApplicationRowKeyPrefix(cluster, subAppUser, null,
         null, null, null).getRowKeyPrefix();
     s.setStartRow(startRow);
     s.setMaxVersions(Integer.MAX_VALUE);
     Connection conn = ConnectionFactory.createConnection(c1);
     ResultScanner scanner =
-        new SubApplicationTable().getResultScanner(c1, conn, s);
+        new SubApplicationTableRW().getResultScanner(c1, conn, s);
 
     int rowCount = 0;
     int colCount = 0;
@@ -407,25 +410,28 @@ public class TestHBaseTimelineStorageEntities {
             user, entity));
 
         // check info column family
-        String id1 = SubApplicationColumn.ID.readResult(result).toString();
+        String id1 = ColumnRWHelper.readResult(result, SubApplicationColumn.ID)
+            .toString();
         assertEquals(id, id1);
 
-        String type1 = SubApplicationColumn.TYPE.readResult(result).toString();
+        String type1 = ColumnRWHelper.readResult(result,
+            SubApplicationColumn.TYPE).toString();
         assertEquals(type, type1);
 
-        Long cTime1 =
-            (Long) SubApplicationColumn.CREATED_TIME.readResult(result);
+        Long cTime1 = (Long) ColumnRWHelper.readResult(result,
+            SubApplicationColumn.CREATED_TIME);
         assertEquals(cTime1, cTime);
 
-        Map<String, Object> infoColumns = SubApplicationColumnPrefix.INFO
-            .readResults(result, new StringKeyConverter());
+        Map<String, Object> infoColumns = ColumnRWHelper.readResults(
+            result, SubApplicationColumnPrefix.INFO, new StringKeyConverter());
         assertEquals(infoMap, infoColumns);
 
         // Remember isRelatedTo is of type Map<String, Set<String>>
         for (Map.Entry<String, Set<String>> isRelatedToEntry : isRelatedTo
             .entrySet()) {
-          Object isRelatedToValue = SubApplicationColumnPrefix.IS_RELATED_TO
-              .readResult(result, isRelatedToEntry.getKey());
+          Object isRelatedToValue = ColumnRWHelper.readResult(
+              result, SubApplicationColumnPrefix.IS_RELATED_TO,
+              isRelatedToEntry.getKey());
           String compoundValue = isRelatedToValue.toString();
           // id7?id9?id6
           Set<String> isRelatedToValues =
@@ -440,8 +446,9 @@ public class TestHBaseTimelineStorageEntities {
         // RelatesTo
         for (Map.Entry<String, Set<String>> relatesToEntry : relatesTo
             .entrySet()) {
-          String compoundValue = SubApplicationColumnPrefix.RELATES_TO
-              .readResult(result, relatesToEntry.getKey()).toString();
+          String compoundValue = ColumnRWHelper.readResult(result,
+              SubApplicationColumnPrefix.RELATES_TO, relatesToEntry.getKey())
+              .toString();
           // id3?id4?id5
           Set<String> relatesToValues =
               new HashSet<String>(Separator.VALUES.splitEncoded(compoundValue));
@@ -453,13 +460,13 @@ public class TestHBaseTimelineStorageEntities {
         }
 
         // Configuration
-        Map<String, Object> configColumns = SubApplicationColumnPrefix.CONFIG
-            .readResults(result, stringKeyConverter);
+        Map<String, Object> configColumns = ColumnRWHelper.readResults(
+            result, SubApplicationColumnPrefix.CONFIG, stringKeyConverter);
         assertEquals(conf, configColumns);
 
         NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
-            SubApplicationColumnPrefix.METRIC.readResultsWithTimestamps(result,
-                stringKeyConverter);
+            ColumnRWHelper.readResultsWithTimestamps(
+                result, SubApplicationColumnPrefix.METRIC, stringKeyConverter);
 
         NavigableMap<Long, Number> metricMap = metricsResult.get(m1.getId());
         matchMetrics(metricValues, metricMap);
@@ -511,7 +518,7 @@ public class TestHBaseTimelineStorageEntities {
       String flow = "other_flow_name";
       String flowVersion = "1111F01C2287BA";
       long runid = 1009876543218L;
-      String appName = HBaseTimelineStorageUtils.convertApplicationIdToString(
+      String appName = HBaseTimelineSchemaUtils.convertApplicationIdToString(
           ApplicationId.newInstance(System.currentTimeMillis() + 9000000L, 1));
       byte[] startRow =
           new EntityRowKeyPrefix(cluster, user, flow, runid, appName)
@@ -525,7 +532,7 @@ public class TestHBaseTimelineStorageEntities {
       s.setStartRow(startRow);
       s.addFamily(EntityColumnFamily.INFO.getBytes());
       Connection conn = ConnectionFactory.createConnection(c1);
-      ResultScanner scanner = new EntityTable().getResultScanner(c1, conn, s);
+      ResultScanner scanner = new EntityTableRW().getResultScanner(c1, conn, s);
 
       int rowCount = 0;
       for (Result result : scanner) {
@@ -538,8 +545,8 @@ public class TestHBaseTimelineStorageEntities {
               entity));
 
           Map<EventColumnName, Object> eventsResult =
-              EntityColumnPrefix.EVENT.readResults(result,
-                  new EventColumnNameConverter());
+              ColumnRWHelper.readResults(result,
+                  EntityColumnPrefix.EVENT, new EventColumnNameConverter());
           // there should be only one event
           assertEquals(1, eventsResult.size());
           for (Map.Entry<EventColumnName, Object> e : eventsResult.entrySet()) {
@@ -604,7 +611,7 @@ public class TestHBaseTimelineStorageEntities {
 
     final TimelineEntity entity = new ApplicationEntity();
     entity.setId(
-        HBaseTimelineStorageUtils.convertApplicationIdToString(
+        HBaseTimelineSchemaUtils.convertApplicationIdToString(
             ApplicationId.newInstance(0, 1)));
     entity.addEvent(event);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
index 0dcd171..f838178 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageSchema.java
@@ -21,6 +21,9 @@ package org.apache.hadoop.yarn.server.timelineservice.storage;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -35,10 +38,6 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Table;
 
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-
 /**
  * Unit tests for checking different schema prefixes.
  */
@@ -61,22 +60,24 @@ public class TestHBaseTimelineStorageSchema {
     conn = ConnectionFactory.createConnection(hbaseConf);
     Admin admin = conn.getAdmin();
 
-    TableName entityTableName = BaseTable.getTableName(hbaseConf,
-        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
+    TableName entityTableName = BaseTableRW.getTableName(hbaseConf,
+        EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME);
     assertTrue(admin.tableExists(entityTableName));
     assertTrue(entityTableName.getNameAsString().startsWith(
         YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX));
-    Table entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
-        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
+    Table entityTable = conn.getTable(BaseTableRW.getTableName(hbaseConf,
+        EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME));
     assertNotNull(entityTable);
 
-    TableName flowRunTableName = BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
+    TableName flowRunTableName = BaseTableRW.getTableName(hbaseConf,
+        FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME);
     assertTrue(admin.tableExists(flowRunTableName));
     assertTrue(flowRunTableName.getNameAsString().startsWith(
         YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX));
-    Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table flowRunTable = conn.getTable(
+        BaseTableRW.getTableName(hbaseConf,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     assertNotNull(flowRunTable);
   }
 
@@ -91,20 +92,22 @@ public class TestHBaseTimelineStorageSchema {
     conn = ConnectionFactory.createConnection(hbaseConf);
     Admin admin = conn.getAdmin();
 
-    TableName entityTableName = BaseTable.getTableName(hbaseConf,
-        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
+    TableName entityTableName = BaseTableRW.getTableName(hbaseConf,
+        EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME);
     assertTrue(admin.tableExists(entityTableName));
     assertTrue(entityTableName.getNameAsString().startsWith(prefix));
-    Table entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
-        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
+    Table entityTable = conn.getTable(BaseTableRW.getTableName(hbaseConf,
+        EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME));
     assertNotNull(entityTable);
 
-    TableName flowRunTableName = BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
+    TableName flowRunTableName = BaseTableRW.getTableName(hbaseConf,
+        FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME);
     assertTrue(admin.tableExists(flowRunTableName));
     assertTrue(flowRunTableName.getNameAsString().startsWith(prefix));
-    Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table flowRunTable = conn.getTable(
+        BaseTableRW.getTableName(hbaseConf,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     assertNotNull(flowRunTable);
 
     // create another set with a diff prefix
@@ -114,20 +117,22 @@ public class TestHBaseTimelineStorageSchema {
     hbaseConf.set(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME,
         prefix);
     DataGeneratorForTest.createSchema(hbaseConf);
-    entityTableName = BaseTable.getTableName(hbaseConf,
-        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME);
+    entityTableName = BaseTableRW.getTableName(hbaseConf,
+        EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME);
     assertTrue(admin.tableExists(entityTableName));
     assertTrue(entityTableName.getNameAsString().startsWith(prefix));
-    entityTable = conn.getTable(BaseTable.getTableName(hbaseConf,
-        EntityTable.TABLE_NAME_CONF_NAME, EntityTable.DEFAULT_TABLE_NAME));
+    entityTable = conn.getTable(BaseTableRW.getTableName(hbaseConf,
+        EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME));
     assertNotNull(entityTable);
 
-    flowRunTableName = BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
+    flowRunTableName = BaseTableRW.getTableName(hbaseConf,
+        FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME);
     assertTrue(admin.tableExists(flowRunTableName));
     assertTrue(flowRunTableName.getNameAsString().startsWith(prefix));
-    flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    flowRunTable = conn.getTable(
+        BaseTableRW.getTableName(hbaseConf,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     assertNotNull(flowRunTable);
     hbaseConf
     .unset(YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
index 4bf221e..645b7d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowActivity.java
@@ -52,9 +52,9 @@ import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContex
 import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -161,8 +161,8 @@ public class TestHBaseStorageFlowActivity {
     Connection conn = ConnectionFactory.createConnection(c1);
     // check in flow activity table
     Table table1 = conn.getTable(
-        BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
-            FlowActivityTable.DEFAULT_TABLE_NAME));
+        BaseTableRW.getTableName(c1, FlowActivityTableRW.TABLE_NAME_CONF_NAME,
+            FlowActivityTableRW.DEFAULT_TABLE_NAME));
     byte[] startRow =
         new FlowActivityRowKey(cluster, minStartTs, user, flow).getRowKey();
     Get g = new Get(startRow);
@@ -178,7 +178,7 @@ public class TestHBaseStorageFlowActivity {
     assertEquals(cluster, flowActivityRowKey.getClusterId());
     assertEquals(user, flowActivityRowKey.getUserId());
     assertEquals(flow, flowActivityRowKey.getFlowName());
-    Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(minStartTs);
+    Long dayTs = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(minStartTs);
     assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
     assertEquals(1, values.size());
     checkFlowActivityRunId(runid, flowVersion, values);
@@ -292,8 +292,8 @@ public class TestHBaseStorageFlowActivity {
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
     Table table1 = conn.getTable(
-        BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
-            FlowActivityTable.DEFAULT_TABLE_NAME));
+        BaseTableRW.getTableName(c1, FlowActivityTableRW.TABLE_NAME_CONF_NAME,
+            FlowActivityTableRW.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
     int rowCount = 0;
     for (Result result : scanner) {
@@ -309,7 +309,7 @@ public class TestHBaseStorageFlowActivity {
       assertEquals(cluster, flowActivityRowKey.getClusterId());
       assertEquals(user, flowActivityRowKey.getUserId());
       assertEquals(flow, flowActivityRowKey.getFlowName());
-      Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(
+      Long dayTs = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(
           appCreatedTime);
       assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
       assertEquals(1, values.size());
@@ -401,7 +401,7 @@ public class TestHBaseStorageFlowActivity {
         assertEquals(user, flowActivity.getUser());
         assertEquals(flow, flowActivity.getFlowName());
         long dayTs =
-            HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(appCreatedTime);
+            HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(appCreatedTime);
         assertEquals(dayTs, flowActivity.getDate().getTime());
         Set<FlowRunEntity> flowRuns = flowActivity.getFlowRuns();
         assertEquals(3, flowRuns.size());
@@ -442,8 +442,8 @@ public class TestHBaseStorageFlowActivity {
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
     Table table1 = conn.getTable(
-        BaseTable.getTableName(c1, FlowActivityTable.TABLE_NAME_CONF_NAME,
-            FlowActivityTable.DEFAULT_TABLE_NAME));
+        BaseTableRW.getTableName(c1, FlowActivityTableRW.TABLE_NAME_CONF_NAME,
+            FlowActivityTableRW.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
     int rowCount = 0;
     for (Result result : scanner) {
@@ -456,7 +456,7 @@ public class TestHBaseStorageFlowActivity {
       assertEquals(cluster, flowActivityRowKey.getClusterId());
       assertEquals(user, flowActivityRowKey.getUserId());
       assertEquals(flow, flowActivityRowKey.getFlowName());
-      Long dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(
+      Long dayTs = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(
           appCreatedTime);
       assertEquals(dayTs, flowActivityRowKey.getDayTimestamp());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
index 1ad02e1..622b0eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRun.java
@@ -62,9 +62,9 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTes
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
 import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -91,8 +91,8 @@ public class TestHBaseStorageFlowRun {
   @Test
   public void checkCoProcessorOff() throws IOException, InterruptedException {
     Configuration hbaseConf = util.getConfiguration();
-    TableName table = BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME);
+    TableName table = BaseTableRW.getTableName(hbaseConf,
+        FlowRunTableRW.TABLE_NAME_CONF_NAME, FlowRunTableRW.DEFAULT_TABLE_NAME);
     Connection conn = null;
     conn = ConnectionFactory.createConnection(hbaseConf);
     Admin admin = conn.getAdmin();
@@ -106,9 +106,9 @@ public class TestHBaseStorageFlowRun {
       checkCoprocessorExists(table, true);
     }
 
-    table = BaseTable.getTableName(hbaseConf,
-        FlowActivityTable.TABLE_NAME_CONF_NAME,
-        FlowActivityTable.DEFAULT_TABLE_NAME);
+    table = BaseTableRW.getTableName(hbaseConf,
+        FlowActivityTableRW.TABLE_NAME_CONF_NAME,
+        FlowActivityTableRW.DEFAULT_TABLE_NAME);
     if (admin.tableExists(table)) {
       // check the regions.
       // check in flow activity table
@@ -116,8 +116,8 @@ public class TestHBaseStorageFlowRun {
       checkCoprocessorExists(table, false);
     }
 
-    table = BaseTable.getTableName(hbaseConf, EntityTable.TABLE_NAME_CONF_NAME,
-        EntityTable.DEFAULT_TABLE_NAME);
+    table = BaseTableRW.getTableName(hbaseConf,
+        EntityTableRW.TABLE_NAME_CONF_NAME, EntityTableRW.DEFAULT_TABLE_NAME);
     if (admin.tableExists(table)) {
       // check the regions.
       // check in entity run table
@@ -224,8 +224,10 @@ public class TestHBaseStorageFlowRun {
 
     Connection conn = ConnectionFactory.createConnection(c1);
     // check in flow run table
-    Table table1 = conn.getTable(BaseTable.getTableName(c1,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(
+        BaseTableRW.getTableName(c1,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     // scan the table and see that we get back the right min and max
     // timestamps
     byte[] startRow = new FlowRunRowKey(cluster, user, flow, runid).getRowKey();
@@ -380,8 +382,10 @@ public class TestHBaseStorageFlowRun {
         .getRowKey();
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
-    Table table1 = conn.getTable(BaseTable.getTableName(c1,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(
+        BaseTableRW.getTableName(c1,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
 
     int loopCount = 0;
@@ -525,8 +529,10 @@ public class TestHBaseStorageFlowRun {
         new FlowRunRowKey(clusterStop, user, flow, runid).getRowKey();
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
-    Table table1 = conn.getTable(BaseTable.getTableName(c1,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(
+        BaseTableRW.getTableName(c1,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
 
     int rowCount = 0;
@@ -810,8 +816,10 @@ public class TestHBaseStorageFlowRun {
       boolean checkMax) throws IOException {
     Connection conn = ConnectionFactory.createConnection(c1);
     // check in flow run table
-    Table table1 = conn.getTable(BaseTable.getTableName(c1,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(
+        BaseTableRW.getTableName(c1,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     // scan the table and see that we get back the right min and max
     // timestamps
     byte[] startRow = new FlowRunRowKey(cluster, user, flow, runid).getRowKey();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
index 0ef8260..31be285 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/TestHBaseStorageFlowRunCompaction.java
@@ -54,9 +54,9 @@ import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
 import org.apache.hadoop.yarn.server.timelineservice.storage.DataGeneratorForTest;
 import org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineWriterImpl;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineServerUtils;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
 import org.junit.AfterClass;
@@ -107,8 +107,10 @@ public class TestHBaseStorageFlowRunCompaction {
     Configuration hbaseConf = util.getConfiguration();
     Connection conn = null;
     conn = ConnectionFactory.createConnection(hbaseConf);
-    Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table flowRunTable = conn.getTable(
+        BaseTableRW.getTableName(hbaseConf,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     flowRunTable.put(p);
 
     Get g = new Get(rowKeyBytes);
@@ -156,8 +158,10 @@ public class TestHBaseStorageFlowRunCompaction {
     Configuration hbaseConf = util.getConfiguration();
     Connection conn = null;
     conn = ConnectionFactory.createConnection(hbaseConf);
-    Table flowRunTable = conn.getTable(BaseTable.getTableName(hbaseConf,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table flowRunTable = conn.getTable(
+        BaseTableRW.getTableName(hbaseConf,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     flowRunTable.put(p);
 
     String rowKey2 = "nonNumericRowKey2";
@@ -324,10 +328,12 @@ public class TestHBaseStorageFlowRunCompaction {
 
     // check in flow run table
     HRegionServer server = util.getRSForFirstRegionInTable(
-        BaseTable.getTableName(c1, FlowRunTable.TABLE_NAME_CONF_NAME,
-            FlowRunTable.DEFAULT_TABLE_NAME));
-    List<Region> regions = server.getOnlineRegions(BaseTable.getTableName(c1,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+        BaseTableRW.getTableName(c1, FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
+    List<Region> regions = server.getOnlineRegions(
+        BaseTableRW.getTableName(c1,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     assertTrue("Didn't find any regions for primary table!",
         regions.size() > 0);
     // flush and compact all the regions of the primary table
@@ -352,8 +358,10 @@ public class TestHBaseStorageFlowRunCompaction {
         new FlowRunRowKey(clusterStop, user, flow, runid).getRowKey();
     s.setStopRow(stopRow);
     Connection conn = ConnectionFactory.createConnection(c1);
-    Table table1 = conn.getTable(BaseTable.getTableName(c1,
-        FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
+    Table table1 = conn.getTable(
+        BaseTableRW.getTableName(c1,
+            FlowRunTableRW.TABLE_NAME_CONF_NAME,
+            FlowRunTableRW.DEFAULT_TABLE_NAME));
     ResultScanner scanner = table1.getScanner(s);
 
     int rowCount = 0;
@@ -420,7 +428,7 @@ public class TestHBaseStorageFlowRunCompaction {
     tags.add(t);
     byte[] tagByteArray = Tag.fromList(tags);
     // create a cell with a VERY old timestamp and attribute SUM_FINAL
-    Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
+    Cell c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
         aQualifier, cell1Ts, Bytes.toBytes(cellValue1), tagByteArray);
     currentColumnCells.add(c1);
 
@@ -430,7 +438,7 @@ public class TestHBaseStorageFlowRunCompaction {
     tags.add(t);
     tagByteArray = Tag.fromList(tags);
     // create a cell with a recent timestamp and attribute SUM_FINAL
-    Cell c2 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
+    Cell c2 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
         aQualifier, cell2Ts, Bytes.toBytes(cellValue2), tagByteArray);
     currentColumnCells.add(c2);
 
@@ -440,7 +448,7 @@ public class TestHBaseStorageFlowRunCompaction {
     tags.add(t);
     tagByteArray = Tag.fromList(tags);
     // create a cell with a VERY old timestamp but has attribute SUM
-    Cell c3 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
+    Cell c3 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
         aQualifier, cell3Ts, Bytes.toBytes(cellValue3), tagByteArray);
     currentColumnCells.add(c3);
 
@@ -450,7 +458,7 @@ public class TestHBaseStorageFlowRunCompaction {
     tags.add(t);
     tagByteArray = Tag.fromList(tags);
     // create a cell with a VERY old timestamp but has attribute SUM
-    Cell c4 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
+    Cell c4 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
         aQualifier, cell4Ts, Bytes.toBytes(cellValue4), tagByteArray);
     currentColumnCells.add(c4);
 
@@ -520,7 +528,7 @@ public class TestHBaseStorageFlowRunCompaction {
       tags.add(t);
       byte[] tagByteArray = Tag.fromList(tags);
       // create a cell with a VERY old timestamp and attribute SUM_FINAL
-      c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
+      c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
           cellTsFinal, Bytes.toBytes(cellValueFinal), tagByteArray);
       currentColumnCells.add(c1);
       cellTsFinal++;
@@ -534,7 +542,7 @@ public class TestHBaseStorageFlowRunCompaction {
       tags.add(t);
       byte[] tagByteArray = Tag.fromList(tags);
       // create a cell with attribute SUM
-      c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
+      c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
           cellTsNotFinal, Bytes.toBytes(cellValueNotFinal), tagByteArray);
       currentColumnCells.add(c1);
       cellTsNotFinal++;
@@ -611,7 +619,7 @@ public class TestHBaseStorageFlowRunCompaction {
       tags.add(t);
       byte[] tagByteArray = Tag.fromList(tags);
       // create a cell with a VERY old timestamp and attribute SUM_FINAL
-      c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
+      c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
           cellTsFinal, Bytes.toBytes(cellValueFinal), tagByteArray);
       currentColumnCells.add(c1);
       cellTsFinal++;
@@ -625,7 +633,7 @@ public class TestHBaseStorageFlowRunCompaction {
       tags.add(t);
       byte[] tagByteArray = Tag.fromList(tags);
       // create a cell with a VERY old timestamp and attribute SUM_FINAL
-      c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
+      c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
           cellTsFinalNotExpire, Bytes.toBytes(cellValueFinal), tagByteArray);
       currentColumnCells.add(c1);
       cellTsFinalNotExpire++;
@@ -639,7 +647,7 @@ public class TestHBaseStorageFlowRunCompaction {
       tags.add(t);
       byte[] tagByteArray = Tag.fromList(tags);
       // create a cell with attribute SUM
-      c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily, aQualifier,
+      c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily, aQualifier,
           cellTsNotFinal, Bytes.toBytes(cellValueNotFinal), tagByteArray);
       currentColumnCells.add(c1);
       cellTsNotFinal++;
@@ -696,7 +704,7 @@ public class TestHBaseStorageFlowRunCompaction {
     SortedSet<Cell> currentColumnCells = new TreeSet<Cell>(KeyValue.COMPARATOR);
 
     // create a cell with a VERY old timestamp and attribute SUM_FINAL
-    Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
+    Cell c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
         aQualifier, 120L, Bytes.toBytes(cellValue1), tagByteArray);
     currentColumnCells.add(c1);
 
@@ -707,7 +715,7 @@ public class TestHBaseStorageFlowRunCompaction {
     tagByteArray = Tag.fromList(tags);
 
     // create a cell with a VERY old timestamp but has attribute SUM
-    Cell c2 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
+    Cell c2 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
         aQualifier, 130L, Bytes.toBytes(cellValue2), tagByteArray);
     currentColumnCells.add(c2);
     List<Cell> cells = fs.processSummationMajorCompaction(currentColumnCells,
@@ -754,7 +762,7 @@ public class TestHBaseStorageFlowRunCompaction {
     SortedSet<Cell> currentColumnCells = new TreeSet<Cell>(KeyValue.COMPARATOR);
 
     // create a cell with a VERY old timestamp
-    Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
+    Cell c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
         aQualifier, 120L, Bytes.toBytes(1110L), tagByteArray);
     currentColumnCells.add(c1);
 
@@ -792,7 +800,7 @@ public class TestHBaseStorageFlowRunCompaction {
 
     SortedSet<Cell> currentColumnCells = new TreeSet<Cell>(KeyValue.COMPARATOR);
 
-    Cell c1 = HBaseTimelineStorageUtils.createNewCell(aRowKey, aFamily,
+    Cell c1 = HBaseTimelineServerUtils.createNewCell(aRowKey, aFamily,
         aQualifier, currentTimestamp, Bytes.toBytes(1110L), tagByteArray);
     currentColumnCells.add(c1);
     List<Cell> cells = fs.processSummationMajorCompaction(currentColumnCells,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
new file mode 100644
index 0000000..a1db497
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
@@ -0,0 +1,219 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                             http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
+    <groupId>org.apache.hadoop</groupId>
+    <version>3.2.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>hadoop-yarn-server-timelineservice-hbase-client</artifactId>
+  <name>Apache Hadoop YARN TimelineService HBase Client</name>
+
+  <properties>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <yarn.basedir>${project.parent.parent.parent.basedir}</yarn.basedir>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-sslengine</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+            <phase>test-compile</phase>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-javadoc-plugin</artifactId>
+        <configuration>
+          <additionalDependencies>
+            <additionnalDependency>
+              <groupId>junit</groupId>
+              <artifactId>junit</artifactId>
+              <version>4.11</version>
+            </additionnalDependency>
+          </additionalDependencies>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <includeScope>runtime</includeScope>
+              <excludeGroupIds>org.slf4j,org.apache.hadoop,com.github.stephenc.findbugs</excludeGroupIds>
+              <outputDirectory>${project.build.directory}/lib</outputDirectory>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDFS-13119. RBF: Manage unavailable clusters. Contributed by Yiqun Lin.

Posted by ha...@apache.org.
HDFS-13119. RBF: Manage unavailable clusters. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8896d20b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8896d20b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8896d20b

Branch: refs/heads/HDFS-12996
Commit: 8896d20b91520053a6bbfb680adb345cd24f4142
Parents: 1d37cf6
Author: Yiqun Lin <yq...@apache.org>
Authored: Tue Feb 20 09:37:08 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Tue Feb 20 09:37:08 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +
 .../federation/metrics/FederationRPCMBean.java  |   2 +
 .../metrics/FederationRPCMetrics.java           |  11 ++
 .../FederationRPCPerformanceMonitor.java        |  10 ++
 .../resolver/NamenodeStatusReport.java          |   8 +
 .../federation/router/RouterRpcClient.java      |  71 +++++++--
 .../federation/router/RouterRpcMonitor.java     |  13 ++
 .../federation/router/RouterRpcServer.java      |   9 ++
 .../src/main/resources/hdfs-default.xml         |  17 +++
 .../router/TestRouterRPCClientRetries.java      | 151 +++++++++++++++++++
 10 files changed, 289 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0828957..bea38d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1246,6 +1246,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS_DEFAULT =
       TimeUnit.SECONDS.toMillis(10);
 
+  // HDFS Router RPC client
+  public static final String DFS_ROUTER_CLIENT_THREADS_SIZE =
+      FEDERATION_ROUTER_PREFIX + "client.thread-size";
+  public static final int DFS_ROUTER_CLIENT_THREADS_SIZE_DEFAULT = 32;
+  public static final String DFS_ROUTER_CLIENT_MAX_ATTEMPTS =
+      FEDERATION_ROUTER_PREFIX + "client.retry.max.attempts";
+  public static final int DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT = 3;
+
   // HDFS Router State Store connection
   public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS =
       FEDERATION_ROUTER_PREFIX + "file.resolver.client.class";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
index 00209e9..3e031fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMBean.java
@@ -42,6 +42,8 @@ public interface FederationRPCMBean {
 
   long getProxyOpNotImplemented();
 
+  long getProxyOpRetries();
+
   long getRouterFailureStateStoreOps();
 
   long getRouterFailureReadOnlyOps();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
index 8995689..94d3383 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCMetrics.java
@@ -56,6 +56,8 @@ public class FederationRPCMetrics implements FederationRPCMBean {
   private MutableCounterLong proxyOpFailureCommunicate;
   @Metric("Number of operations not implemented")
   private MutableCounterLong proxyOpNotImplemented;
+  @Metric("Number of operation retries")
+  private MutableCounterLong proxyOpRetries;
 
   @Metric("Failed requests due to State Store unavailable")
   private MutableCounterLong routerFailureStateStore;
@@ -126,6 +128,15 @@ public class FederationRPCMetrics implements FederationRPCMBean {
     return proxyOpNotImplemented.value();
   }
 
+  public void incrProxyOpRetries() {
+    proxyOpRetries.incr();
+  }
+
+  @Override
+  public long getProxyOpRetries() {
+    return proxyOpRetries.value();
+  }
+
   public void incrRouterFailureStateStore() {
     routerFailureStateStore.incr();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
index e3a16b5..547ebb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java
@@ -159,6 +159,11 @@ public class FederationRPCPerformanceMonitor implements RouterRpcMonitor {
   }
 
   @Override
+  public void proxyOpRetries() {
+    metrics.incrProxyOpRetries();
+  }
+
+  @Override
   public void routerFailureStateStore() {
     metrics.incrRouterFailureStateStore();
   }
@@ -208,4 +213,9 @@ public class FederationRPCPerformanceMonitor implements RouterRpcMonitor {
     }
     return -1;
   }
+
+  @Override
+  public FederationRPCMetrics getRPCMetrics() {
+    return this.metrics;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
index d3c6d87..c3c6fa8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
@@ -390,6 +390,14 @@ public class NamenodeStatusReport {
     return this.numOfBlocksPendingDeletion;
   }
 
+  /**
+   * Set the validity of registration.
+   * @param isValid The desired value to be set.
+   */
+  public void setRegistrationValid(boolean isValid) {
+    this.registrationValid = isValid;
+  }
+
   @Override
   public String toString() {
     return String.format("%s-%s:%s",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 4209a49..d3b7947 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -46,12 +46,14 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
@@ -123,10 +125,14 @@ public class RouterRpcClient {
     this.connectionManager = new ConnectionManager(conf);
     this.connectionManager.start();
 
+    int numThreads = conf.getInt(
+        DFSConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE,
+        DFSConfigKeys.DFS_ROUTER_CLIENT_THREADS_SIZE_DEFAULT);
     ThreadFactory threadFactory = new ThreadFactoryBuilder()
         .setNameFormat("RPC Router Client-%d")
         .build();
-    this.executorService = Executors.newCachedThreadPool(threadFactory);
+    this.executorService = Executors.newFixedThreadPool(
+        numThreads, threadFactory);
 
     this.rpcMonitor = monitor;
 
@@ -134,8 +140,8 @@ public class RouterRpcClient {
         HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
         HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
     int maxRetryAttempts = conf.getInt(
-        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
-        HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
+        DFSConfigKeys.DFS_ROUTER_CLIENT_MAX_ATTEMPTS,
+        DFSConfigKeys.DFS_ROUTER_CLIENT_MAX_ATTEMPTS_DEFAULT);
     int failoverSleepBaseMillis = conf.getInt(
         HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
         HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
@@ -274,11 +280,24 @@ public class RouterRpcClient {
    *
    * @param ioe IOException reported.
    * @param retryCount Number of retries.
+   * @param nsId Nameservice ID.
    * @return Retry decision.
-   * @throws IOException Original exception if the retry policy generates one.
+   * @throws IOException Original exception if the retry policy generates one
+   *                     or IOException for no available namenodes.
    */
-  private RetryDecision shouldRetry(final IOException ioe, final int retryCount)
-      throws IOException {
+  private RetryDecision shouldRetry(final IOException ioe, final int retryCount,
+      final String nsId) throws IOException {
+    // check for the case of cluster unavailable state
+    if (isClusterUnAvailable(nsId)) {
+      // we allow to retry once if cluster is unavailable
+      if (retryCount == 0) {
+        return RetryDecision.RETRY;
+      } else {
+        throw new IOException("No namenode available under nameservice " + nsId,
+            ioe);
+      }
+    }
+
     try {
       final RetryPolicy.RetryAction a =
           this.retryPolicy.shouldRetry(ioe, retryCount, 0, true);
@@ -329,7 +348,7 @@ public class RouterRpcClient {
         connection = this.getConnection(ugi, nsId, rpcAddress);
         ProxyAndInfo<ClientProtocol> client = connection.getClient();
         ClientProtocol proxy = client.getProxy();
-        ret = invoke(0, method, proxy, params);
+        ret = invoke(nsId, 0, method, proxy, params);
         if (failover) {
           // Success on alternate server, update
           InetSocketAddress address = client.getAddress();
@@ -400,6 +419,8 @@ public class RouterRpcClient {
    * Re-throws exceptions generated by the remote RPC call as either
    * RemoteException or IOException.
    *
+   * @param nsId Identifier for the namespace
+   * @param retryCount Current retry times
    * @param method Method to invoke
    * @param obj Target object for the method
    * @param params Variable parameters
@@ -407,8 +428,8 @@ public class RouterRpcClient {
    * @throws IOException
    * @throws InterruptedException
    */
-  private Object invoke(int retryCount, final Method method, final Object obj,
-      final Object... params) throws IOException {
+  private Object invoke(String nsId, int retryCount, final Method method,
+      final Object obj, final Object... params) throws IOException {
     try {
       return method.invoke(obj, params);
     } catch (IllegalAccessException e) {
@@ -421,11 +442,16 @@ public class RouterRpcClient {
       Throwable cause = e.getCause();
       if (cause instanceof IOException) {
         IOException ioe = (IOException) cause;
+
         // Check if we should retry.
-        RetryDecision decision = shouldRetry(ioe, retryCount);
+        RetryDecision decision = shouldRetry(ioe, retryCount, nsId);
         if (decision == RetryDecision.RETRY) {
+          if (this.rpcMonitor != null) {
+            this.rpcMonitor.proxyOpRetries();
+          }
+
           // retry
-          return invoke(++retryCount, method, obj, params);
+          return invoke(nsId, ++retryCount, method, obj, params);
         } else if (decision == RetryDecision.FAILOVER_AND_RETRY) {
           // failover, invoker looks for standby exceptions for failover.
           if (ioe instanceof StandbyException) {
@@ -448,6 +474,29 @@ public class RouterRpcClient {
   }
 
   /**
+   * Check if the cluster of given nameservice id is available.
+   * @param nsId nameservice ID.
+   * @return
+   * @throws IOException
+   */
+  private boolean isClusterUnAvailable(String nsId) throws IOException {
+    List<? extends FederationNamenodeContext> nnState = this.namenodeResolver
+        .getNamenodesForNameserviceId(nsId);
+
+    if (nnState != null) {
+      for (FederationNamenodeContext nnContext : nnState) {
+        // Once we find one NN is in active state, we assume this
+        // cluster is available.
+        if (nnContext.getState() == FederationNamenodeServiceState.ACTIVE) {
+          return false;
+        }
+      }
+    }
+
+    return true;
+  }
+
+  /**
    * Get a clean copy of the exception. Sometimes the exceptions returned by the
    * server contain the full stack trace in the message.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
index d889a56..df9aa11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcMonitor.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCMetrics;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 
 /**
@@ -36,6 +37,12 @@ public interface RouterRpcMonitor {
       Configuration conf, RouterRpcServer server, StateStoreService store);
 
   /**
+   * Get Router RPC metrics info.
+   * @return The instance of FederationRPCMetrics.
+   */
+  FederationRPCMetrics getRPCMetrics();
+
+  /**
    * Close the monitor.
    */
   void close();
@@ -74,6 +81,12 @@ public interface RouterRpcMonitor {
   void proxyOpNotImplemented();
 
   /**
+   * Retry to proxy an operation to a Namenode because of an unexpected
+   * exception.
+   */
+  void proxyOpRetries();
+
+  /**
    * If the Router cannot contact the State Store in an operation.
    */
   void routerFailureStateStore();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 57125ca..e0dfeb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -105,6 +105,7 @@ import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCMetrics;
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
 import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
@@ -2177,4 +2178,12 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
   public Quota getQuotaModule() {
     return this.quotaCall;
   }
+
+  /**
+   * Get RPC metrics info.
+   * @return The instance of FederationRPCMetrics.
+   */
+  public FederationRPCMetrics getRPCMetrics() {
+    return this.rpcMonitor.getRPCMetrics();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index b61c418..d037b2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -5234,4 +5234,21 @@
       is assumed.
     </description>
   </property>
+
+  <property>
+    <name>dfs.federation.router.client.thread-size</name>
+    <value>32</value>
+    <description>
+      Max threads size for the RouterClient to execute concurrent
+      requests.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.federation.router.client.retry.max.attempts</name>
+    <value>3</value>
+    <description>
+      Max retry attempts for the RouterClient talking to the Router.
+    </description>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8896d20b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
new file mode 100644
index 0000000..dddcb5a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.NamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCMetrics;
+import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test retry behavior of the Router RPC Client.
+ */
+public class TestRouterRPCClientRetries {
+
+  private static StateStoreDFSCluster cluster;
+  private static NamenodeContext nnContext1;
+  private static RouterContext routerContext;
+  private static MembershipNamenodeResolver resolver;
+  private static ClientProtocol routerProtocol;
+
+  @Before
+  public void setUp() throws Exception {
+    // Build and start a federated cluster
+    cluster = new StateStoreDFSCluster(false, 2);
+    Configuration routerConf = new RouterConfigBuilder()
+        .stateStore()
+        .admin()
+        .rpc()
+        .build();
+
+    cluster.addRouterOverrides(routerConf);
+    cluster.startCluster();
+    cluster.startRouters();
+    cluster.waitClusterUp();
+
+    nnContext1 = cluster.getNamenode(cluster.getNameservices().get(0), null);
+    routerContext = cluster.getRandomRouter();
+    resolver = (MembershipNamenodeResolver) routerContext.getRouter()
+        .getNamenodeResolver();
+    routerProtocol = routerContext.getClient().getNamenode();
+  }
+
+  @After
+  public void tearDown() {
+    if (cluster != null) {
+      cluster.stopRouter(routerContext);
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @Test
+  public void testRetryWhenAllNameServiceDown() throws Exception {
+    // shutdown the dfs cluster
+    MiniDFSCluster dfsCluster = cluster.getCluster();
+    dfsCluster.shutdown();
+
+    // register an invalid namenode report
+    registerInvalidNameReport();
+
+    // Create a directory via the router
+    String dirPath = "/testRetryWhenClusterisDown";
+    FsPermission permission = new FsPermission("705");
+    try {
+      routerProtocol.mkdirs(dirPath, permission, false);
+      fail("Should have thrown RemoteException error.");
+    } catch (RemoteException e) {
+      String ns0 = cluster.getNameservices().get(0);
+      GenericTestUtils.assertExceptionContains(
+          "No namenode available under nameservice " + ns0, e);
+    }
+
+    // Verify the retry times, it should only retry one time.
+    FederationRPCMetrics rpcMetrics = routerContext.getRouter()
+        .getRpcServer().getRPCMetrics();
+    assertEquals(1, rpcMetrics.getProxyOpRetries());
+  }
+
+  @Test
+  public void testRetryWhenOneNameServiceDown() throws Exception {
+    // shutdown the dfs cluster
+    MiniDFSCluster dfsCluster = cluster.getCluster();
+    dfsCluster.shutdownNameNode(0);
+
+    // register an invalid namenode report
+    registerInvalidNameReport();
+
+    DFSClient client = nnContext1.getClient();
+    // Renew lease for the DFS client, it will succeed.
+    routerProtocol.renewLease(client.getClientName());
+
+    // Verify the retry times, it will retry one time for ns0.
+    FederationRPCMetrics rpcMetrics = routerContext.getRouter()
+        .getRpcServer().getRPCMetrics();
+    assertEquals(1, rpcMetrics.getProxyOpRetries());
+  }
+
+  /**
+   * Register an invalid namenode report.
+   * @throws IOException
+   */
+  private void registerInvalidNameReport() throws IOException {
+    String ns0 = cluster.getNameservices().get(0);
+    List<? extends FederationNamenodeContext> origin = resolver
+        .getNamenodesForNameserviceId(ns0);
+    FederationNamenodeContext nnInfo = origin.get(0);
+    NamenodeStatusReport report = new NamenodeStatusReport(ns0,
+        nnInfo.getNamenodeId(), nnInfo.getRpcAddress(),
+        nnInfo.getServiceAddress(), nnInfo.getLifelineAddress(),
+        nnInfo.getWebAddress());
+    report.setRegistrationValid(false);
+    assertTrue(resolver.registerNamenode(report));
+    resolver.loadCache(true);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HADOOP-15223. Replace Collections.EMPTY* with empty* when available

Posted by ha...@apache.org.
HADOOP-15223. Replace Collections.EMPTY* with empty* when available

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d4dde51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d4dde51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d4dde51

Branch: refs/heads/HDFS-12996
Commit: 4d4dde5112e9ee6b37cbdea17104c5a4c6870bd5
Parents: 87bdde6
Author: fang zhenyi <fa...@zte.com.cn>
Authored: Sun Feb 18 22:19:23 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Sun Feb 18 22:19:39 2018 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/crypto/key/KeyProvider.java     |  6 ++----
 .../hdfs/protocol/TestHdfsFileStatusMethods.java      |  2 +-
 .../lib/service/security/DummyGroupMapping.java       |  3 +--
 .../main/java/org/apache/hadoop/fs/s3a/Listing.java   |  2 +-
 .../java/org/apache/hadoop/fs/s3a/S3AFileSystem.java  |  2 +-
 .../apache/hadoop/tools/mapred/TestCopyCommitter.java |  2 +-
 .../hadoop/yarn/sls/scheduler/RMNodeWrapper.java      |  6 ++----
 .../yarn/api/protocolrecords/AllocateRequest.java     |  2 +-
 .../yarn/api/protocolrecords/AllocateResponse.java    |  2 +-
 .../org/apache/hadoop/yarn/api/records/Container.java |  2 +-
 .../yarn/security/ContainerTokenIdentifier.java       |  2 +-
 .../server/api/protocolrecords/NMContainerStatus.java |  2 +-
 .../resourceplugin/ResourcePluginManager.java         |  3 ++-
 .../server/resourcemanager/DefaultAMSProcessor.java   |  4 ++--
 .../ProportionalCapacityPreemptionPolicy.java         | 14 ++++++--------
 .../rmapp/attempt/RMAppAttemptImpl.java               |  2 +-
 .../server/resourcemanager/rmnode/RMNodeImpl.java     |  2 +-
 .../resourcemanager/rmnode/RMNodeStatusEvent.java     |  3 +--
 .../constraint/processor/BatchedRequests.java         |  2 +-
 .../constraint/processor/PlacementDispatcher.java     |  4 ++--
 .../SingleConstraintAppPlacementAllocator.java        |  3 +--
 .../resourcemanager/TestResourceTrackerService.java   |  5 ++---
 ...ortionalCapacityPreemptionPolicyMockFramework.java |  2 +-
 23 files changed, 34 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
index c1c371b..62cc381 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
@@ -199,9 +199,8 @@ public abstract class KeyProvider {
       return cipher;
     }
 
-    @SuppressWarnings("unchecked")
     public Map<String, String> getAttributes() {
-      return (attributes == null) ? Collections.EMPTY_MAP : attributes;
+      return (attributes == null) ? Collections.emptyMap() : attributes;
     }
 
     /**
@@ -370,9 +369,8 @@ public abstract class KeyProvider {
       return description;
     }
 
-    @SuppressWarnings("unchecked")
     public Map<String, String> getAttributes() {
-      return (attributes == null) ? Collections.EMPTY_MAP : attributes;
+      return (attributes == null) ? Collections.emptyMap() : attributes;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
index 3cc4190..683a1ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestHdfsFileStatusMethods.java
@@ -54,7 +54,7 @@ public class TestHdfsFileStatusMethods {
     assertEquals(fsM.stream()
             .map(MethodSignature::toString)
             .collect(joining("\n")),
-        Collections.EMPTY_SET, fsM);
+        Collections.emptySet(), fsM);
   }
 
   /** Map non-static, declared methods for this class to signatures. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java
index 1676909..9ef786d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/DummyGroupMapping.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.test.HadoopUsersConfTestHelper;
 public class DummyGroupMapping implements GroupMappingServiceProvider {
 
   @Override
-  @SuppressWarnings("unchecked")
   public List<String> getGroups(String user) throws IOException {
     if (user.equals("root")) {
       return Arrays.asList("admin");
@@ -37,7 +36,7 @@ public class DummyGroupMapping implements GroupMappingServiceProvider {
       return Arrays.asList("nobody");
     } else {
       String[] groups = HadoopUsersConfTestHelper.getHadoopUserGroups(user);
-      return (groups != null) ? Arrays.asList(groups) : Collections.EMPTY_LIST;
+      return (groups != null) ? Arrays.asList(groups) : Collections.emptyList();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
index 11b2e47..b016ead 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
@@ -723,7 +723,7 @@ public class Listing {
       if (tombstones != null) {
         this.tombstones = tombstones;
       } else {
-        this.tombstones = Collections.EMPTY_SET;
+        this.tombstones = Collections.emptySet();
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 53875ba..99901ba 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -2099,7 +2099,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities {
 
     // Check MetadataStore, if any.
     PathMetadata pm = metadataStore.get(path, needEmptyDirectoryFlag);
-    Set<Path> tombstones = Collections.EMPTY_SET;
+    Set<Path> tombstones = Collections.emptySet();
     if (pm != null) {
       if (pm.isDeleted()) {
         throw new FileNotFoundException("Path " + f + " is recorded as " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
index 6ee37cc..bf151cd 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyCommitter.java
@@ -415,7 +415,7 @@ public class TestCopyCommitter {
     @Override
     public List getSplits(JobContext context)
         throws IOException, InterruptedException {
-      return Collections.EMPTY_LIST;
+      return Collections.emptyList();
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 92f9b0f..78645e9 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -144,9 +144,8 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
-  @SuppressWarnings("unchecked")
   public List<UpdatedContainerInfo> pullContainerUpdates() {
-    List<UpdatedContainerInfo> list = Collections.EMPTY_LIST;
+    List<UpdatedContainerInfo> list = Collections.emptyList();
     if (! pulled) {
       list = updates;
       pulled = true;
@@ -168,11 +167,10 @@ public class RMNodeWrapper implements RMNode {
     return RMNodeLabelsManager.EMPTY_STRING_SET;
   }
 
-  @SuppressWarnings("unchecked")
   @Override
   public List<Container> pullNewlyIncreasedContainers() {
     // TODO Auto-generated method stub
-    return Collections.EMPTY_LIST;
+    return Collections.emptyList();
   }
 
   public OpportunisticContainersStatus getOpportunisticContainersStatus() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
index 876957e..eee50e3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
@@ -223,7 +223,7 @@ public abstract class AllocateRequest {
   @Public
   @Unstable
   public List<SchedulingRequest> getSchedulingRequests() {
-    return Collections.EMPTY_LIST;
+    return Collections.emptyList();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
index 52c30e2..3b9c30f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
@@ -420,7 +420,7 @@ public abstract class AllocateResponse {
   @Public
   @Unstable
   public List<RejectedSchedulingRequest> getRejectedSchedulingRequests() {
-    return Collections.EMPTY_LIST;
+    return Collections.emptyList();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
index b9ca3f9..2dc6e1f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
@@ -263,7 +263,7 @@ public abstract class Container implements Comparable<Container> {
   @Private
   @Unstable
   public Set<String> getAllocationTags() {
-    return Collections.EMPTY_SET;
+    return Collections.emptySet();
   }
 
   @Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
index 70935cb..c5a649d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java
@@ -368,7 +368,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
     if (proto.getAllocationTagsList() != null) {
       return new HashSet<>(proto.getAllocationTagsList());
     }
-    return Collections.EMPTY_SET;
+    return Collections.emptySet();
   }
 
   // TODO: Needed?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
index 77b3df6..065918d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/NMContainerStatus.java
@@ -169,7 +169,7 @@ public abstract class NMContainerStatus {
    * Get and set the Allocation tags associated with the container.
    */
   public Set<String> getAllocationTags() {
-    return Collections.EMPTY_SET;
+    return Collections.emptySet();
   }
 
   public void setAllocationTags(Set<String> allocationTags) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index 12d679b..f28aad2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -46,7 +46,8 @@ public class ResourcePluginManager {
   private static final Set<String> SUPPORTED_RESOURCE_PLUGINS = ImmutableSet.of(
       GPU_URI, FPGA_URI);
 
-  private Map<String, ResourcePlugin> configuredPlugins = Collections.EMPTY_MAP;
+  private Map<String, ResourcePlugin> configuredPlugins =
+          Collections.emptyMap();
 
   public synchronized void initialize(Context context)
       throws YarnException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
index 18ab473..71558a7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
@@ -207,10 +207,10 @@ final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
         request.getResourceBlacklistRequest();
     List<String> blacklistAdditions =
         (blacklistRequest != null) ?
-            blacklistRequest.getBlacklistAdditions() : Collections.EMPTY_LIST;
+            blacklistRequest.getBlacklistAdditions() : Collections.emptyList();
     List<String> blacklistRemovals =
         (blacklistRequest != null) ?
-            blacklistRequest.getBlacklistRemovals() : Collections.EMPTY_LIST;
+            blacklistRequest.getBlacklistRemovals() : Collections.emptyList();
     RMApp app =
         getRmContext().getRMApps().get(appAttemptId.getApplicationId());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 304d204..3f9fd17 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -136,23 +136,21 @@ public class ProportionalCapacityPreemptionPolicy
   private Map<String, PreemptableQueue> preemptableQueues;
   private Set<ContainerId> killableContainers;
 
-  @SuppressWarnings("unchecked")
   public ProportionalCapacityPreemptionPolicy() {
     clock = SystemClock.getInstance();
-    allPartitions = Collections.EMPTY_SET;
-    leafQueueNames = Collections.EMPTY_SET;
-    preemptableQueues = Collections.EMPTY_MAP;
+    allPartitions = Collections.emptySet();
+    leafQueueNames = Collections.emptySet();
+    preemptableQueues = Collections.emptyMap();
   }
 
-  @SuppressWarnings("unchecked")
   @VisibleForTesting
   public ProportionalCapacityPreemptionPolicy(RMContext context,
       CapacityScheduler scheduler, Clock clock) {
     init(context.getYarnConfiguration(), context, scheduler);
     this.clock = clock;
-    allPartitions = Collections.EMPTY_SET;
-    leafQueueNames = Collections.EMPTY_SET;
-    preemptableQueues = Collections.EMPTY_MAP;
+    allPartitions = Collections.emptySet();
+    leafQueueNames = Collections.emptySet();
+    preemptableQueues = Collections.emptyMap();
   }
 
   public void init(Configuration config, RMContext context,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 8c2f4e4..c23b135 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -2237,7 +2237,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         return attempt.getBlacklistedNodes();
       }
     }
-    return Collections.EMPTY_SET;
+    return Collections.emptySet();
   }
 
   protected void onInvalidTranstion(RMAppAttemptEventType rmAppAttemptEventType,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
index 4fc2d8a..3cbde01 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java
@@ -1477,7 +1477,7 @@ public class RMNodeImpl implements RMNode, EventHandler<RMNodeEvent> {
       writeLock.lock();
 
       if (nmReportedIncreasedContainers.isEmpty()) {
-        return Collections.EMPTY_LIST;
+        return Collections.emptyList();
       } else {
         List<Container> container =
             new ArrayList<Container>(nmReportedIncreasedContainers.values());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
index c79f270..5f5fe24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeStatusEvent.java
@@ -80,10 +80,9 @@ public class RMNodeStatusEvent extends RMNodeEvent {
     this.logAggregationReportsForApps = logAggregationReportsForApps;
   }
   
-  @SuppressWarnings("unchecked")
   public List<Container> getNMReportedIncreasedContainers() {
     return this.nodeStatus.getIncreasedContainers() == null ?
-        Collections.EMPTY_LIST : this.nodeStatus.getIncreasedContainers();
+        Collections.emptyList() : this.nodeStatus.getIncreasedContainers();
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
index 6badfee..9f8bf3d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/BatchedRequests.java
@@ -131,7 +131,7 @@ public class BatchedRequests
    * @return Set of blacklisted Nodes.
    */
   public Set<NodeId> getBlacklist(String tag) {
-    return blacklist.getOrDefault(tag, Collections.EMPTY_SET);
+    return blacklist.getOrDefault(tag, Collections.emptySet());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
index 849eb21..2259d1b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/processor/PlacementDispatcher.java
@@ -87,7 +87,7 @@ class PlacementDispatcher implements
       }
       return retList;
     }
-    return Collections.EMPTY_LIST;
+    return Collections.emptyList();
   }
 
   public List<SchedulingRequestWithPlacementAttempt> pullRejectedRequests(
@@ -104,7 +104,7 @@ class PlacementDispatcher implements
       }
       return retList;
     }
-    return Collections.EMPTY_LIST;
+    return Collections.emptyList();
   }
 
   void clearApplicationState(ApplicationId applicationId) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
index a04816b..ed07345 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
@@ -373,9 +373,8 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
   }
 
   @Override
-  @SuppressWarnings("unchecked")
   public Map<String, ResourceRequest> getResourceRequests() {
-    return Collections.EMPTY_MAP;
+    return Collections.emptyMap();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
index 96e4451..de8f8f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java
@@ -723,13 +723,12 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     }
   }
 
-  @SuppressWarnings("unchecked")
   private NodeStatus getNodeStatusObject(NodeId nodeId) {
     NodeStatus status = Records.newRecord(NodeStatus.class);
     status.setNodeId(nodeId);
     status.setResponseId(0);
-    status.setContainersStatuses(Collections.EMPTY_LIST);
-    status.setKeepAliveApplications(Collections.EMPTY_LIST);
+    status.setContainersStatuses(Collections.emptyList());
+    status.setKeepAliveApplications(Collections.emptyList());
     return status;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d4dde51/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
index ca43a95..a8e2697 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java
@@ -781,7 +781,7 @@ public class ProportionalCapacityPreemptionPolicyMockFramework {
       }
     }
 
-    return Collections.EMPTY_MAP;
+    return Collections.emptyMap();
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HADOOP-15247. Move commons-net up to 3.6. Contributed by Steve Loughran.

Posted by ha...@apache.org.
HADOOP-15247. Move commons-net up to 3.6.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/004b7223
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/004b7223
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/004b7223

Branch: refs/heads/HDFS-12996
Commit: 004b722372de67635a24e71b264b3b604df4b693
Parents: bdd2a18
Author: Steve Loughran <st...@apache.org>
Authored: Wed Feb 21 10:40:42 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Wed Feb 21 10:40:42 2018 +0000

----------------------------------------------------------------------
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/004b7223/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index ce51c99..f4ac239 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -612,7 +612,7 @@
       <dependency>
         <groupId>commons-net</groupId>
         <artifactId>commons-net</artifactId>
-        <version>3.1</version>
+        <version>3.6</version>
       </dependency>
       <dependency>
         <groupId>javax.servlet</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HDFS-12998. SnapshotDiff - Provide an iterator-based listing API for calculating snapshotDiff. Contributed by Shashikant Banerjee

Posted by ha...@apache.org.
HDFS-12998. SnapshotDiff - Provide an iterator-based listing API for calculating snapshotDiff. Contributed by Shashikant Banerjee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83e2bb98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83e2bb98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83e2bb98

Branch: refs/heads/HDFS-12996
Commit: 83e2bb98eea45ddcb598080f68a2f69de1f04485
Parents: 4d4dde5
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Mon Feb 19 11:42:10 2018 +0800
Committer: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Committed: Mon Feb 19 11:42:10 2018 +0800

----------------------------------------------------------------------
 .../hadoop/hdfs/DistributedFileSystem.java      |  87 +++++++++++++
 .../snapshot/TestSnapshotDiffReport.java        | 130 +++++++++++++++++++
 2 files changed, 217 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83e2bb98/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 3883f2f..35b6417 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1994,6 +1994,93 @@ public class DistributedFileSystem extends FileSystem
     }.resolve(this, absF);
   }
 
+  /**
+   * Returns a remote iterator so that followup calls are made on demand
+   * while consuming the SnapshotDiffReportListing entries.
+   * This reduces memory consumption overhead in case the snapshotDiffReport
+   * is huge.
+   *
+   * @param snapshotDir
+   *          full path of the directory where snapshots are taken
+   * @param fromSnapshot
+   *          snapshot name of the from point. Null indicates the current
+   *          tree
+   * @param toSnapshot
+   *          snapshot name of the to point. Null indicates the current
+   *          tree.
+   * @return Remote iterator
+   */
+  public RemoteIterator
+      <SnapshotDiffReportListing> snapshotDiffReportListingRemoteIterator(
+      final Path snapshotDir, final String fromSnapshot,
+      final String toSnapshot) throws IOException {
+    Path absF = fixRelativePart(snapshotDir);
+    return new FileSystemLinkResolver
+        <RemoteIterator<SnapshotDiffReportListing>>() {
+      @Override
+      public RemoteIterator<SnapshotDiffReportListing> doCall(final Path p)
+          throws IOException {
+        return new SnapshotDiffReportListingIterator(
+            getPathName(p), fromSnapshot, toSnapshot);
+      }
+
+      @Override
+      public RemoteIterator<SnapshotDiffReportListing> next(final FileSystem fs,
+          final Path p) throws IOException {
+        return ((DistributedFileSystem) fs)
+            .snapshotDiffReportListingRemoteIterator(p, fromSnapshot,
+                toSnapshot);
+      }
+    }.resolve(this, absF);
+
+  }
+
+  /**
+   * This class defines an iterator that returns
+   * the SnapshotDiffReportListing for a snapshottable directory
+   * between two given snapshots.
+   */
+  private final class SnapshotDiffReportListingIterator implements
+      RemoteIterator<SnapshotDiffReportListing> {
+    private final String snapshotDir;
+    private final String fromSnapshot;
+    private final String toSnapshot;
+
+    private byte[] startPath;
+    private int index;
+    private boolean hasNext = true;
+
+    private SnapshotDiffReportListingIterator(String snapshotDir,
+        String fromSnapshot, String toSnapshot) {
+      this.snapshotDir = snapshotDir;
+      this.fromSnapshot = fromSnapshot;
+      this.toSnapshot = toSnapshot;
+      this.startPath = DFSUtilClient.EMPTY_BYTES;
+      this.index = -1;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return hasNext;
+    }
+
+    @Override
+    public SnapshotDiffReportListing next() throws IOException {
+      if (!hasNext) {
+        throw new java.util.NoSuchElementException(
+            "No more entry in SnapshotDiffReport for " + snapshotDir);
+      }
+      final SnapshotDiffReportListing part =
+          dfs.getSnapshotDiffReportListing(snapshotDir, fromSnapshot,
+              toSnapshot, startPath, index);
+      startPath = part.getLastPath();
+      index = part.getLastIndex();
+      hasNext =
+          !(Arrays.equals(startPath, DFSUtilClient.EMPTY_BYTES) && index == -1);
+      return part;
+    }
+  }
+
   private SnapshotDiffReport getSnapshotDiffReportInternal(
       final String snapshotDir, final String fromSnapshot,
       final String toSnapshot) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/83e2bb98/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
index a4fb8ab..3bfcfbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
@@ -28,11 +28,15 @@ import java.util.Date;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Random;
+import java.util.List;
+import java.util.ArrayList;
 
+import org.apache.commons.collections.list.TreeList;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -40,14 +44,17 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.client.impl.SnapshotDiffReportGenerator;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Assert;
@@ -1409,4 +1416,127 @@ public class TestSnapshotDiffReport {
         new DiffReportEntry(DiffType.DELETE,
             DFSUtil.string2Bytes("dir3/file3")));
   }
+
+  private void verifyDiffReportForGivenReport(Path dirPath, String from,
+      String to, SnapshotDiffReport report, DiffReportEntry... entries)
+      throws IOException {
+    // reverse the order of from and to
+    SnapshotDiffReport inverseReport =
+        hdfs.getSnapshotDiffReport(dirPath, to, from);
+    LOG.info(report.toString());
+    LOG.info(inverseReport.toString() + "\n");
+
+    assertEquals(entries.length, report.getDiffList().size());
+    assertEquals(entries.length, inverseReport.getDiffList().size());
+
+    for (DiffReportEntry entry : entries) {
+      if (entry.getType() == DiffType.MODIFY) {
+        assertTrue(report.getDiffList().contains(entry));
+        assertTrue(inverseReport.getDiffList().contains(entry));
+      } else if (entry.getType() == DiffType.DELETE) {
+        assertTrue(report.getDiffList().contains(entry));
+        assertTrue(inverseReport.getDiffList().contains(
+            new DiffReportEntry(DiffType.CREATE, entry.getSourcePath())));
+      } else if (entry.getType() == DiffType.CREATE) {
+        assertTrue(report.getDiffList().contains(entry));
+        assertTrue(inverseReport.getDiffList().contains(
+            new DiffReportEntry(DiffType.DELETE, entry.getSourcePath())));
+      }
+    }
+  }
+
+  @Test
+  public void testSnapshotDiffReportRemoteIterator() throws Exception {
+    final Path root = new Path("/");
+    hdfs.mkdirs(root);
+    for (int i = 1; i <= 3; i++) {
+      final Path path = new Path(root, "dir" + i);
+      hdfs.mkdirs(path);
+    }
+    for (int i = 1; i <= 3; i++) {
+      final Path path = new Path(root, "dir" + i);
+      for (int j = 1; j < 4; j++) {
+        final Path file = new Path(path, "file" + j);
+        DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
+      }
+    }
+    SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
+    Path targetDir = new Path(root, "dir4");
+    //create directory dir4
+    hdfs.mkdirs(targetDir);
+    //moves files from dir1 to dir4
+    Path path = new Path(root, "dir1");
+    for (int j = 1; j < 4; j++) {
+      final Path srcPath = new Path(path, "file" + j);
+      final Path targetPath = new Path(targetDir, "file" + j);
+      hdfs.rename(srcPath, targetPath);
+    }
+    targetDir = new Path(root, "dir3");
+    //overwrite existing files in dir3 from files in dir1
+    path = new Path(root, "dir2");
+    for (int j = 1; j < 4; j++) {
+      final Path srcPath = new Path(path, "file" + j);
+      final Path targetPath = new Path(targetDir, "file" + j);
+      hdfs.rename(srcPath, targetPath, Rename.OVERWRITE);
+    }
+    final Path pathToRename = new Path(root, "dir2");
+    //move dir2 inside dir3
+    hdfs.rename(pathToRename, targetDir);
+    SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
+    RemoteIterator<SnapshotDiffReportListing> iterator =
+        hdfs.snapshotDiffReportListingRemoteIterator(root, "s0", "s1");
+    SnapshotDiffReportGenerator snapshotDiffReport;
+    List<SnapshotDiffReportListing.DiffReportListingEntry> modifiedList =
+        new TreeList();
+    List<SnapshotDiffReportListing.DiffReportListingEntry> createdList =
+        new ChunkedArrayList<>();
+    List<SnapshotDiffReportListing.DiffReportListingEntry> deletedList =
+        new ChunkedArrayList<>();
+    SnapshotDiffReportListing report = null;
+    List<SnapshotDiffReportListing> reportList = new ArrayList<>();
+    while (iterator.hasNext()) {
+      report = iterator.next();
+      reportList.add(report);
+      modifiedList.addAll(report.getModifyList());
+      createdList.addAll(report.getCreateList());
+      deletedList.addAll(report.getDeleteList());
+    }
+    try {
+      iterator.next();
+    } catch (Exception e) {
+      Assert.assertTrue(
+          e.getMessage().contains("No more entry in SnapshotDiffReport for /"));
+    }
+    Assert.assertNotEquals(0, reportList.size());
+    // generate the snapshotDiffReport and Verify
+    snapshotDiffReport = new SnapshotDiffReportGenerator("/", "s0", "s1",
+        report.getIsFromEarlier(), modifiedList, createdList, deletedList);
+    verifyDiffReportForGivenReport(root, "s0", "s1",
+        snapshotDiffReport.generateReport(),
+        new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
+        new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("dir4")),
+        new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2"),
+            DFSUtil.string2Bytes("dir3/dir2")),
+        new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
+        new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file1"),
+            DFSUtil.string2Bytes("dir4/file1")),
+        new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file2"),
+            DFSUtil.string2Bytes("dir4/file2")),
+        new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/file3"),
+            DFSUtil.string2Bytes("dir4/file3")),
+        new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
+        new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file1"),
+            DFSUtil.string2Bytes("dir3/file1")),
+        new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file2"),
+            DFSUtil.string2Bytes("dir3/file2")),
+        new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir2/file3"),
+            DFSUtil.string2Bytes("dir3/file3")),
+        new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir3")),
+        new DiffReportEntry(DiffType.DELETE,
+            DFSUtil.string2Bytes("dir3/file1")),
+        new DiffReportEntry(DiffType.DELETE,
+            DFSUtil.string2Bytes("dir3/file1")),
+        new DiffReportEntry(DiffType.DELETE,
+            DFSUtil.string2Bytes("dir3/file3")));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
new file mode 100644
index 0000000..faed348
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
@@ -0,0 +1,489 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Query;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+class SubApplicationEntityReader extends GenericEntityReader {
+  private static final SubApplicationTableRW SUB_APPLICATION_TABLE =
+      new SubApplicationTableRW();
+
+  SubApplicationEntityReader(TimelineReaderContext ctxt,
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, entityFilters, toRetrieve);
+  }
+
+  SubApplicationEntityReader(TimelineReaderContext ctxt,
+      TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link SubApplicationTableRW}.
+   */
+  protected BaseTableRW<?> getTable() {
+    return SUB_APPLICATION_TABLE;
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFilters() throws IOException {
+    // Filters here cannot be null for multiple entity reads as they are set in
+    // augmentParams if null.
+    FilterList listBasedOnFilters = new FilterList();
+    TimelineEntityFilters filters = getFilters();
+    // Create filter list based on created time range and add it to
+    // listBasedOnFilters.
+    long createdTimeBegin = filters.getCreatedTimeBegin();
+    long createdTimeEnd = filters.getCreatedTimeEnd();
+    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils
+          .createSingleColValueFiltersByRange(SubApplicationColumn.CREATED_TIME,
+              createdTimeBegin, createdTimeEnd));
+    }
+    // Create filter list based on metric filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList metricFilters = filters.getMetricFilters();
+    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          SubApplicationColumnPrefix.METRIC, metricFilters));
+    }
+    // Create filter list based on config filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList configFilters = filters.getConfigFilters();
+    if (configFilters != null && !configFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          SubApplicationColumnPrefix.CONFIG, configFilters));
+    }
+    // Create filter list based on info filters and add it to listBasedOnFilters
+    TimelineFilterList infoFilters = filters.getInfoFilters();
+    if (infoFilters != null && !infoFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils
+          .createHBaseFilterList(SubApplicationColumnPrefix.INFO, infoFilters));
+    }
+    return listBasedOnFilters;
+  }
+
+  /**
+   * Add {@link QualifierFilter} filters to filter list for each column of
+   * entity table.
+   *
+   * @param list filter list to which qualifier filters have to be added.
+   */
+  protected void updateFixedColumns(FilterList list) {
+    for (SubApplicationColumn column : SubApplicationColumn.values()) {
+      list.addFilter(new QualifierFilter(CompareOp.EQUAL,
+          new BinaryComparator(column.getColumnQualifierBytes())));
+    }
+  }
+
+  /**
+   * Creates a filter list which indicates that only some of the column
+   * qualifiers in the info column family will be returned in result.
+   *
+   * @param isApplication If true, it means operations are to be performed for
+   *          application table, otherwise for entity table.
+   * @return filter list.
+   * @throws IOException if any problem occurs while creating filter list.
+   */
+  private FilterList createFilterListForColsOfInfoFamily() throws IOException {
+    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
+    // Add filters for each column in entity table.
+    updateFixedColumns(infoFamilyColsFilter);
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // If INFO field has to be retrieved, add a filter for fetching columns
+    // with INFO column prefix.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.INFO));
+    }
+    TimelineFilterList relatesTo = getFilters().getRelatesTo();
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      // If RELATES_TO field has to be retrieved, add a filter for fetching
+      // columns with RELATES_TO column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.RELATES_TO));
+    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain RELATES_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // relatesTo filters are specified. relatesTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> relatesToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.RELATES_TO, relatesToCols));
+    }
+    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
+      // columns with IS_RELATED_TO column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.IS_RELATED_TO));
+    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain IS_RELATED_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // isRelatedTo filters are specified. isRelatedTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> isRelatedToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
+    }
+    TimelineFilterList eventFilters = getFilters().getEventFilters();
+    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
+      // If EVENTS field has to be retrieved, add a filter for fetching columns
+      // with EVENT column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
+              SubApplicationColumnPrefix.EVENT));
+    } else if (eventFilters != null
+        && !eventFilters.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain EVENTS, we still need to
+      // have a filter to fetch some of the column qualifiers on the basis of
+      // event filters specified. Event filters will then be matched after
+      // fetching rows from HBase.
+      Set<String> eventCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          SubApplicationColumnPrefix.EVENT, eventCols));
+    }
+    return infoFamilyColsFilter;
+  }
+
+  /**
+   * Exclude column prefixes via filters which are not required(based on fields
+   * to retrieve) from info column family. These filters are added to filter
+   * list which contains a filter for getting info column family.
+   *
+   * @param infoColFamilyList filter list for info column family.
+   */
+  private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // Events not required.
+    if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.EVENT));
+    }
+    // info not required.
+    if (!hasField(fieldsToRetrieve, Field.INFO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.INFO));
+    }
+    // is related to not required.
+    if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.IS_RELATED_TO));
+    }
+    // relates to not required.
+    if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              SubApplicationColumnPrefix.RELATES_TO));
+    }
+  }
+
+  /**
+   * Updates filter list based on fields for confs and metrics to retrieve.
+   *
+   * @param listBasedOnFields filter list based on fields.
+   * @throws IOException if any problem occurs while updating filter list.
+   */
+  private void updateFilterForConfsAndMetricsToRetrieve(
+      FilterList listBasedOnFields) throws IOException {
+    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
+    // Please note that if confsToRetrieve is specified, we would have added
+    // CONFS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
+      // Create a filter list for configs.
+      listBasedOnFields.addFilter(
+          TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getConfsToRetrieve(),
+              SubApplicationColumnFamily.CONFIGS,
+              SubApplicationColumnPrefix.CONFIG));
+    }
+
+    // Please note that if metricsToRetrieve is specified, we would have added
+    // METRICS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
+      // Create a filter list for metrics.
+      listBasedOnFields.addFilter(
+          TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getMetricsToRetrieve(),
+              SubApplicationColumnFamily.METRICS,
+              SubApplicationColumnPrefix.METRIC));
+    }
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFields() throws IOException {
+    if (!needCreateFilterListBasedOnFields()) {
+      // Fetch all the columns. No need of a filter.
+      return null;
+    }
+    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
+    FilterList infoColFamilyList = new FilterList();
+    // By default fetch everything in INFO column family.
+    FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL,
+        new BinaryComparator(SubApplicationColumnFamily.INFO.getBytes()));
+    infoColFamilyList.addFilter(infoColumnFamily);
+    if (fetchPartialColsFromInfoFamily()) {
+      // We can fetch only some of the columns from info family.
+      infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
+    } else {
+      // Exclude column prefixes in info column family which are not required
+      // based on fields to retrieve.
+      excludeFieldsFromInfoColFamily(infoColFamilyList);
+    }
+    listBasedOnFields.addFilter(infoColFamilyList);
+    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
+    return listBasedOnFields;
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
+    Preconditions.checkNotNull(getDataToRetrieve(),
+        "data to retrieve shouldn't be null");
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getDoAsUser(),
+        "DoAsUser shouldn't be null");
+    Preconditions.checkNotNull(getContext().getEntityType(),
+        "entityType shouldn't be null");
+  }
+
+  @Override
+  protected void augmentParams(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
+    createFiltersIfNull();
+  }
+
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.setMetricsTimeRange(query,
+        SubApplicationColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
+  }
+
+  @Override
+  protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+
+    // Scan through part of the table to find the entities belong to one app
+    // and one type
+    Scan scan = new Scan();
+    TimelineReaderContext context = getContext();
+    if (context.getDoAsUser() == null) {
+      throw new BadRequestException("Invalid user!");
+    }
+
+    RowKeyPrefix<SubApplicationRowKey> subApplicationRowKeyPrefix = null;
+    // default mode, will always scans from beginning of entity type.
+    if (getFilters() == null || getFilters().getFromId() == null) {
+      subApplicationRowKeyPrefix = new SubApplicationRowKeyPrefix(
+          context.getDoAsUser(), context.getClusterId(),
+          context.getEntityType(), null, null, null);
+      scan.setRowPrefixFilter(subApplicationRowKeyPrefix.getRowKeyPrefix());
+    } else { // pagination mode, will scan from given entityIdPrefix!enitityId
+
+      SubApplicationRowKey entityRowKey = null;
+      try {
+        entityRowKey = SubApplicationRowKey
+            .parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(entityRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+
+      // set start row
+      scan.setStartRow(entityRowKey.getRowKey());
+
+      // get the bytes for stop row
+      subApplicationRowKeyPrefix = new SubApplicationRowKeyPrefix(
+          context.getDoAsUser(), context.getClusterId(),
+          context.getEntityType(), null, null, null);
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              subApplicationRowKeyPrefix.getRowKeyPrefix()));
+
+      // set page filter to limit. This filter has to set only in pagination
+      // mode.
+      filterList.addFilter(new PageFilter(getFilters().getLimit()));
+    }
+    setMetricsTimeRange(scan);
+    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
+    if (filterList != null && !filterList.getFilters().isEmpty()) {
+      scan.setFilter(filterList);
+    }
+    return getTable().getResultScanner(hbaseConf, conn, scan);
+  }
+
+  @Override
+  protected Result getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    throw new UnsupportedOperationException(
+        "we don't support a single entity query");
+  }
+
+  @Override
+  protected TimelineEntity parseEntity(Result result) throws IOException {
+    if (result == null || result.isEmpty()) {
+      return null;
+    }
+    TimelineEntity entity = new TimelineEntity();
+    SubApplicationRowKey parseRowKey =
+        SubApplicationRowKey.parseRowKey(result.getRow());
+    entity.setType(parseRowKey.getEntityType());
+    entity.setId(parseRowKey.getEntityId());
+    entity.setIdPrefix(parseRowKey.getEntityIdPrefix().longValue());
+
+    TimelineEntityFilters filters = getFilters();
+    // fetch created time
+    Long createdTime = (Long) ColumnRWHelper.readResult(result,
+        SubApplicationColumn.CREATED_TIME);
+    entity.setCreatedTime(createdTime);
+
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // fetch is related to entities and match isRelatedTo filter. If isRelatedTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // isRelatedTo are not set in HBase scan.
+    boolean checkIsRelatedTo =
+        filters.getIsRelatedTo() != null
+            && filters.getIsRelatedTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO) || checkIsRelatedTo) {
+      readRelationship(entity, result, SubApplicationColumnPrefix.IS_RELATED_TO,
+          true);
+      if (checkIsRelatedTo && !TimelineStorageUtils.matchIsRelatedTo(entity,
+          filters.getIsRelatedTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+        entity.getIsRelatedToEntities().clear();
+      }
+    }
+
+    // fetch relates to entities and match relatesTo filter. If relatesTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // relatesTo are not set in HBase scan.
+    boolean checkRelatesTo =
+        !isSingleEntityRead() && filters.getRelatesTo() != null
+            && filters.getRelatesTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO) || checkRelatesTo) {
+      readRelationship(entity, result, SubApplicationColumnPrefix.RELATES_TO,
+          false);
+      if (checkRelatesTo && !TimelineStorageUtils.matchRelatesTo(entity,
+          filters.getRelatesTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+        entity.getRelatesToEntities().clear();
+      }
+    }
+
+    // fetch info if fieldsToRetrieve contains INFO or ALL.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      readKeyValuePairs(entity, result, SubApplicationColumnPrefix.INFO, false);
+    }
+
+    // fetch configs if fieldsToRetrieve contains CONFIGS or ALL.
+    if (hasField(fieldsToRetrieve, Field.CONFIGS)) {
+      readKeyValuePairs(entity, result, SubApplicationColumnPrefix.CONFIG,
+          true);
+    }
+
+    // fetch events and match event filters if they exist. If event filters do
+    // not match, entity would be dropped. We have to match filters locally
+    // as relevant HBase filters to filter out rows on the basis of events
+    // are not set in HBase scan.
+    boolean checkEvents =
+        !isSingleEntityRead() && filters.getEventFilters() != null
+            && filters.getEventFilters().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.EVENTS) || checkEvents) {
+      readEvents(entity, result, SubApplicationColumnPrefix.EVENT);
+      if (checkEvents && !TimelineStorageUtils.matchEventFilters(entity,
+          filters.getEventFilters())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+        entity.getEvents().clear();
+      }
+    }
+
+    // fetch metrics if fieldsToRetrieve contains METRICS or ALL.
+    if (hasField(fieldsToRetrieve, Field.METRICS)) {
+      readMetrics(entity, result, SubApplicationColumnPrefix.METRIC);
+    }
+
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        parseRowKey.getRowKeyAsString());
+    return entity;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
new file mode 100644
index 0000000..3168163
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
@@ -0,0 +1,464 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The base class for reading and deserializing timeline entities from the
+ * HBase storage. Different types can be defined for different types of the
+ * entities that are being requested.
+ */
+public abstract class TimelineEntityReader extends
+    AbstractTimelineStorageReader {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TimelineEntityReader.class);
+
+  private final boolean singleEntityRead;
+  private TimelineDataToRetrieve dataToRetrieve;
+  // used only for multiple entity read mode
+  private TimelineEntityFilters filters;
+
+  /**
+   * Main table the entity reader uses.
+   */
+  private BaseTableRW<?> table;
+
+  /**
+   * Used to convert strings key components to and from storage format.
+   */
+  private final KeyConverter<String> stringKeyConverter =
+      new StringKeyConverter();
+
+  /**
+   * Instantiates a reader for multiple-entity reads.
+   *
+   * @param ctxt Reader context which defines the scope in which query has to be
+   *     made.
+   * @param entityFilters Filters which limit the entities returned.
+   * @param toRetrieve Data to retrieve for each entity.
+   */
+  protected TimelineEntityReader(TimelineReaderContext ctxt,
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt);
+    this.singleEntityRead = false;
+    this.dataToRetrieve = toRetrieve;
+    this.filters = entityFilters;
+
+    this.setTable(getTable());
+  }
+
+  /**
+   * Instantiates a reader for single-entity reads.
+   *
+   * @param ctxt Reader context which defines the scope in which query has to be
+   *     made.
+   * @param toRetrieve Data to retrieve for each entity.
+   */
+  protected TimelineEntityReader(TimelineReaderContext ctxt,
+      TimelineDataToRetrieve toRetrieve) {
+    super(ctxt);
+    this.singleEntityRead = true;
+    this.dataToRetrieve = toRetrieve;
+
+    this.setTable(getTable());
+  }
+
+  /**
+   * Creates a {@link FilterList} based on fields, confs and metrics to
+   * retrieve. This filter list will be set in Scan/Get objects to trim down
+   * results fetched from HBase back-end storage. This is called only for
+   * multiple entity reads.
+   *
+   * @return a {@link FilterList} object.
+   * @throws IOException if any problem occurs while creating filter list.
+   */
+  protected abstract FilterList constructFilterListBasedOnFields()
+      throws IOException;
+
+  /**
+   * Creates a {@link FilterList} based on info, config and metric filters. This
+   * filter list will be set in HBase Get to trim down results fetched from
+   * HBase back-end storage.
+   *
+   * @return a {@link FilterList} object.
+   * @throws IOException if any problem occurs while creating filter list.
+   */
+  protected abstract FilterList constructFilterListBasedOnFilters()
+      throws IOException;
+
+  /**
+   * Combines filter lists created based on fields and based on filters.
+   *
+   * @return a {@link FilterList} object if it can be constructed. Returns null,
+   * if filter list cannot be created either on the basis of filters or on the
+   * basis of fields.
+   * @throws IOException if any problem occurs while creating filter list.
+   */
+  private FilterList createFilterList() throws IOException {
+    FilterList listBasedOnFilters = constructFilterListBasedOnFilters();
+    boolean hasListBasedOnFilters = listBasedOnFilters != null &&
+        !listBasedOnFilters.getFilters().isEmpty();
+    FilterList listBasedOnFields = constructFilterListBasedOnFields();
+    boolean hasListBasedOnFields = listBasedOnFields != null &&
+        !listBasedOnFields.getFilters().isEmpty();
+    // If filter lists based on both filters and fields can be created,
+    // combine them in a new filter list and return it.
+    // If either one of them has been created, return that filter list.
+    // Return null, if none of the filter lists can be created. This indicates
+    // that no filter list needs to be added to HBase Scan as filters are not
+    // specified for the query or only the default view of entity needs to be
+    // returned.
+    if (hasListBasedOnFilters && hasListBasedOnFields) {
+      FilterList list = new FilterList();
+      list.addFilter(listBasedOnFilters);
+      list.addFilter(listBasedOnFields);
+      return list;
+    } else if (hasListBasedOnFilters) {
+      return listBasedOnFilters;
+    } else if (hasListBasedOnFields) {
+      return listBasedOnFields;
+    }
+    return null;
+  }
+
+  protected TimelineDataToRetrieve getDataToRetrieve() {
+    return dataToRetrieve;
+  }
+
+  protected TimelineEntityFilters getFilters() {
+    return filters;
+  }
+
+  /**
+   * Create a {@link TimelineEntityFilters} object with default values for
+   * filters.
+   */
+  protected void createFiltersIfNull() {
+    if (filters == null) {
+      filters = new TimelineEntityFilters.Builder().build();
+    }
+  }
+
+  /**
+   * Reads and deserializes a single timeline entity from the HBase storage.
+   *
+   * @param hbaseConf HBase Configuration.
+   * @param conn HBase Connection.
+   * @return A <cite>TimelineEntity</cite> object.
+   * @throws IOException if there is any exception encountered while reading
+   *     entity.
+   */
+  public TimelineEntity readEntity(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    validateParams();
+    augmentParams(hbaseConf, conn);
+
+    FilterList filterList = constructFilterListBasedOnFields();
+    if (LOG.isDebugEnabled() && filterList != null) {
+      LOG.debug("FilterList created for get is - " + filterList);
+    }
+    Result result = getResult(hbaseConf, conn, filterList);
+    if (result == null || result.isEmpty()) {
+      // Could not find a matching row.
+      LOG.info("Cannot find matching entity of type " +
+          getContext().getEntityType());
+      return null;
+    }
+    return parseEntity(result);
+  }
+
+  /**
+   * Reads and deserializes a set of timeline entities from the HBase storage.
+   * It goes through all the results available, and returns the number of
+   * entries as specified in the limit in the entity's natural sort order.
+   *
+   * @param hbaseConf HBase Configuration.
+   * @param conn HBase Connection.
+   * @return a set of <cite>TimelineEntity</cite> objects.
+   * @throws IOException if any exception is encountered while reading entities.
+   */
+  public Set<TimelineEntity> readEntities(Configuration hbaseConf,
+      Connection conn) throws IOException {
+    validateParams();
+    augmentParams(hbaseConf, conn);
+
+    Set<TimelineEntity> entities = new LinkedHashSet<>();
+    FilterList filterList = createFilterList();
+    if (LOG.isDebugEnabled() && filterList != null) {
+      LOG.debug("FilterList created for scan is - " + filterList);
+    }
+    ResultScanner results = getResults(hbaseConf, conn, filterList);
+    try {
+      for (Result result : results) {
+        TimelineEntity entity = parseEntity(result);
+        if (entity == null) {
+          continue;
+        }
+        entities.add(entity);
+        if (entities.size() == filters.getLimit()) {
+          break;
+        }
+      }
+      return entities;
+    } finally {
+      results.close();
+    }
+  }
+
+  /**
+   * Returns the main table to be used by the entity reader.
+   *
+   * @return A reference to the table.
+   */
+  protected BaseTableRW<?> getTable() {
+    return table;
+  }
+
+  /**
+   * Fetches a {@link Result} instance for a single-entity read.
+   *
+   * @param hbaseConf HBase Configuration.
+   * @param conn HBase Connection.
+   * @param filterList filter list which will be applied to HBase Get.
+   * @return the {@link Result} instance or null if no such record is found.
+   * @throws IOException if any exception is encountered while getting result.
+   */
+  protected abstract Result getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException;
+
+  /**
+   * Fetches a {@link ResultScanner} for a multi-entity read.
+   *
+   * @param hbaseConf HBase Configuration.
+   * @param conn HBase Connection.
+   * @param filterList filter list which will be applied to HBase Scan.
+   * @return the {@link ResultScanner} instance.
+   * @throws IOException if any exception is encountered while getting results.
+   */
+  protected abstract ResultScanner getResults(Configuration hbaseConf,
+      Connection conn, FilterList filterList) throws IOException;
+
+  /**
+   * Parses the result retrieved from HBase backend and convert it into a
+   * {@link TimelineEntity} object.
+   *
+   * @param result Single row result of a Get/Scan.
+   * @return the <cite>TimelineEntity</cite> instance or null if the entity is
+   *     filtered.
+   * @throws IOException if any exception is encountered while parsing entity.
+   */
+  protected abstract TimelineEntity parseEntity(Result result)
+      throws IOException;
+
+  /**
+   * Helper method for reading and deserializing {@link TimelineMetric} objects
+   * using the specified column prefix. The timeline metrics then are added to
+   * the given timeline entity.
+   *
+   * @param entity {@link TimelineEntity} object.
+   * @param result {@link Result} object retrieved from backend.
+   * @param columnPrefix Metric column prefix
+   * @throws IOException if any exception is encountered while reading metrics.
+   */
+  protected void readMetrics(TimelineEntity entity, Result result,
+      ColumnPrefix<?> columnPrefix) throws IOException {
+    NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
+        ColumnRWHelper.readResultsWithTimestamps(
+            result, columnPrefix, stringKeyConverter);
+    for (Map.Entry<String, NavigableMap<Long, Number>> metricResult:
+        metricsResult.entrySet()) {
+      TimelineMetric metric = new TimelineMetric();
+      metric.setId(metricResult.getKey());
+      // Simply assume that if the value set contains more than 1 elements, the
+      // metric is a TIME_SERIES metric, otherwise, it's a SINGLE_VALUE metric
+      TimelineMetric.Type metricType = metricResult.getValue().size() > 1 ?
+          TimelineMetric.Type.TIME_SERIES : TimelineMetric.Type.SINGLE_VALUE;
+      metric.setType(metricType);
+      metric.addValues(metricResult.getValue());
+      entity.addMetric(metric);
+    }
+  }
+
+  /**
+   * Checks whether the reader has been created to fetch single entity or
+   * multiple entities.
+   *
+   * @return true, if query is for single entity, false otherwise.
+   */
+  public boolean isSingleEntityRead() {
+    return singleEntityRead;
+  }
+
+  protected void setTable(BaseTableRW<?> baseTable) {
+    this.table = baseTable;
+  }
+
+  /**
+   * Check if we have a certain field amongst fields to retrieve. This method
+   * checks against {@link Field#ALL} as well because that would mean field
+   * passed needs to be matched.
+   *
+   * @param fieldsToRetrieve fields to be retrieved.
+   * @param requiredField fields to be checked in fieldsToRetrieve.
+   * @return true if has the required field, false otherwise.
+   */
+  protected boolean hasField(EnumSet<Field> fieldsToRetrieve,
+      Field requiredField) {
+    return fieldsToRetrieve.contains(Field.ALL) ||
+        fieldsToRetrieve.contains(requiredField);
+  }
+
+  /**
+   * Create a filter list of qualifier filters based on passed set of columns.
+   *
+   * @param <T> Describes the type of column prefix.
+   * @param colPrefix Column Prefix.
+   * @param columns set of column qualifiers.
+   * @return filter list.
+   */
+  protected <T extends BaseTable<T>> FilterList
+      createFiltersFromColumnQualifiers(
+          ColumnPrefix<T> colPrefix, Set<String> columns) {
+    FilterList list = new FilterList(Operator.MUST_PASS_ONE);
+    for (String column : columns) {
+      // For columns which have compound column qualifiers (eg. events), we need
+      // to include the required separator.
+      byte[] compoundColQual = createColQualifierPrefix(colPrefix, column);
+      list.addFilter(new QualifierFilter(CompareOp.EQUAL,
+          new BinaryPrefixComparator(colPrefix
+              .getColumnPrefixBytes(compoundColQual))));
+    }
+    return list;
+  }
+
+  protected <T extends BaseTable<T>> byte[] createColQualifierPrefix(
+      ColumnPrefix<T> colPrefix, String column) {
+    if (colPrefix == ApplicationColumnPrefix.EVENT
+        || colPrefix == EntityColumnPrefix.EVENT) {
+      return new EventColumnName(column, null, null).getColumnQualifier();
+    } else {
+      return stringKeyConverter.encode(column);
+    }
+  }
+
+  /**
+   * Helper method for reading relationship.
+   *
+   * @param <T> Describes the type of column prefix.
+   * @param entity entity to fill.
+   * @param result result from HBase.
+   * @param prefix column prefix.
+   * @param isRelatedTo if true, means relationship is to be added to
+   *          isRelatedTo, otherwise its added to relatesTo.
+   * @throws IOException if any problem is encountered while reading result.
+   */
+  protected <T extends BaseTable<T>> void readRelationship(
+      TimelineEntity entity, Result result,
+      ColumnPrefix<T> prefix, boolean isRelatedTo) throws IOException {
+    // isRelatedTo and relatesTo are of type Map<String, Set<String>>
+    Map<String, Object> columns = ColumnRWHelper.readResults(
+        result, prefix, stringKeyConverter);
+    for (Map.Entry<String, Object> column : columns.entrySet()) {
+      for (String id : Separator.VALUES.splitEncoded(column.getValue()
+          .toString())) {
+        if (isRelatedTo) {
+          entity.addIsRelatedToEntity(column.getKey(), id);
+        } else {
+          entity.addRelatesToEntity(column.getKey(), id);
+        }
+      }
+    }
+  }
+
+  /**
+   * Read events from the entity table or the application table. The column name
+   * is of the form "eventId=timestamp=infoKey" where "infoKey" may be omitted
+   * if there is no info associated with the event.
+   *
+   * @param <T> Describes the type of column prefix.
+   * @param entity entity to fill.
+   * @param result HBase Result.
+   * @param prefix column prefix.
+   * @throws IOException if any problem is encountered while reading result.
+   */
+  protected static <T extends BaseTable<T>> void readEvents(
+      TimelineEntity entity, Result result,
+      ColumnPrefix<T> prefix) throws IOException {
+    Map<String, TimelineEvent> eventsMap = new HashMap<>();
+    Map<EventColumnName, Object> eventsResult = ColumnRWHelper.readResults(
+        result, prefix, new EventColumnNameConverter());
+    for (Map.Entry<EventColumnName, Object>
+             eventResult : eventsResult.entrySet()) {
+      EventColumnName eventColumnName = eventResult.getKey();
+      String key = eventColumnName.getId() +
+          Long.toString(eventColumnName.getTimestamp());
+      // Retrieve previously seen event to add to it
+      TimelineEvent event = eventsMap.get(key);
+      if (event == null) {
+        // First time we're seeing this event, add it to the eventsMap
+        event = new TimelineEvent();
+        event.setId(eventColumnName.getId());
+        event.setTimestamp(eventColumnName.getTimestamp());
+        eventsMap.put(key, event);
+      }
+      if (eventColumnName.getInfoKey() != null) {
+        event.addInfo(eventColumnName.getInfoKey(), eventResult.getValue());
+      }
+    }
+    Set<TimelineEvent> eventsSet = new HashSet<>(eventsMap.values());
+    entity.addEvents(eventsSet);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
new file mode 100644
index 0000000..fa16077
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+
+/**
+ * Factory methods for instantiating a timeline entity reader.
+ */
+public final class TimelineEntityReaderFactory {
+  private TimelineEntityReaderFactory() {
+  }
+
+  /**
+   * Creates a timeline entity reader instance for reading a single entity with
+   * the specified input.
+   *
+   * @param context Reader context which defines the scope in which query has to
+   *     be made.
+   * @param dataToRetrieve Data to retrieve for each entity.
+   * @return An implementation of <cite>TimelineEntityReader</cite> object
+   *     depending on entity type.
+   */
+  public static TimelineEntityReader createSingleEntityReader(
+      TimelineReaderContext context, TimelineDataToRetrieve dataToRetrieve) {
+    // currently the types that are handled separate from the generic entity
+    // table are application, flow run, and flow activity entities
+    if (TimelineEntityType.YARN_APPLICATION.matches(context.getEntityType())) {
+      return new ApplicationEntityReader(context, dataToRetrieve);
+    } else if (TimelineEntityType.
+        YARN_FLOW_RUN.matches(context.getEntityType())) {
+      return new FlowRunEntityReader(context, dataToRetrieve);
+    } else if (TimelineEntityType.
+        YARN_FLOW_ACTIVITY.matches(context.getEntityType())) {
+      return new FlowActivityEntityReader(context, dataToRetrieve);
+    } else {
+      // assume we're dealing with a generic entity read
+      return new GenericEntityReader(context, dataToRetrieve);
+    }
+  }
+
+  /**
+   * Creates a timeline entity reader instance for reading set of entities with
+   * the specified input and predicates.
+   *
+   * @param context Reader context which defines the scope in which query has to
+   *     be made.
+   * @param filters Filters which limit the entities returned.
+   * @param dataToRetrieve Data to retrieve for each entity.
+   * @return An implementation of <cite>TimelineEntityReader</cite> object
+   *     depending on entity type.
+   */
+  public static TimelineEntityReader createMultipleEntitiesReader(
+      TimelineReaderContext context, TimelineEntityFilters filters,
+      TimelineDataToRetrieve dataToRetrieve) {
+    // currently the types that are handled separate from the generic entity
+    // table are application, flow run, and flow activity entities
+    if (TimelineEntityType.YARN_APPLICATION.matches(context.getEntityType())) {
+      return new ApplicationEntityReader(context, filters, dataToRetrieve);
+    } else if (TimelineEntityType.
+        YARN_FLOW_ACTIVITY.matches(context.getEntityType())) {
+      return new FlowActivityEntityReader(context, filters, dataToRetrieve);
+    } else if (TimelineEntityType.
+        YARN_FLOW_RUN.matches(context.getEntityType())) {
+      return new FlowRunEntityReader(context, filters, dataToRetrieve);
+    } else {
+      if (context.getDoAsUser() != null) {
+        return new SubApplicationEntityReader(context, filters, dataToRetrieve);
+      }
+      // assume we're dealing with a generic entity read
+      return new GenericEntityReader(context, filters, dataToRetrieve);
+    }
+  }
+
+  /**
+   * Creates a timeline entity type reader that will read all available entity
+   * types within the specified context.
+   *
+   * @param context Reader context which defines the scope in which query has to
+   *                be made. Limited to application level only.
+   * @return an <cite>EntityTypeReader</cite> object
+   */
+  public static EntityTypeReader createEntityTypeReader(
+      TimelineReaderContext context) {
+    return new EntityTypeReader(context);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/package-info.java
new file mode 100644
index 0000000..9814d6d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.reader
+ * contains classes used to read entities from backend based on query type.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTableRW.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTableRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTableRW.java
new file mode 100644
index 0000000..256b24b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTableRW.java
@@ -0,0 +1,137 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Create, read and write to the SubApplication table.
+ */
+public class SubApplicationTableRW extends BaseTableRW<SubApplicationTable> {
+  /** sub app prefix. */
+  private static final String PREFIX =
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "subapplication";
+
+  /** config param name that specifies the subapplication table name. */
+  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
+
+  /**
+   * config param name that specifies the TTL for metrics column family in
+   * subapplication table.
+   */
+  private static final String METRICS_TTL_CONF_NAME = PREFIX
+      + ".table.metrics.ttl";
+
+  /**
+   * config param name that specifies max-versions for
+   * metrics column family in subapplication table.
+   */
+  private static final String METRICS_MAX_VERSIONS =
+      PREFIX + ".table.metrics.max-versions";
+
+  /** default value for subapplication table name. */
+  public static final String DEFAULT_TABLE_NAME =
+      "timelineservice.subapplication";
+
+  /** default TTL is 30 days for metrics timeseries. */
+  private static final int DEFAULT_METRICS_TTL = 2592000;
+
+  /** default max number of versions. */
+  private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      SubApplicationTableRW.class);
+
+  public SubApplicationTableRW() {
+    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
+   * createTable(org.apache.hadoop.hbase.client.Admin,
+   * org.apache.hadoop.conf.Configuration)
+   */
+  public void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException {
+
+    TableName table = getTableName(hbaseConf);
+    if (admin.tableExists(table)) {
+      // do not disable / delete existing table
+      // similar to the approach taken by map-reduce jobs when
+      // output directory exists
+      throw new IOException("Table " + table.getNameAsString()
+          + " already exists.");
+    }
+
+    HTableDescriptor subAppTableDescp = new HTableDescriptor(table);
+    HColumnDescriptor infoCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.INFO.getBytes());
+    infoCF.setBloomFilterType(BloomType.ROWCOL);
+    subAppTableDescp.addFamily(infoCF);
+
+    HColumnDescriptor configCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.CONFIGS.getBytes());
+    configCF.setBloomFilterType(BloomType.ROWCOL);
+    configCF.setBlockCacheEnabled(true);
+    subAppTableDescp.addFamily(configCF);
+
+    HColumnDescriptor metricsCF =
+        new HColumnDescriptor(SubApplicationColumnFamily.METRICS.getBytes());
+    subAppTableDescp.addFamily(metricsCF);
+    metricsCF.setBlockCacheEnabled(true);
+    // always keep 1 version (the latest)
+    metricsCF.setMinVersions(1);
+    metricsCF.setMaxVersions(
+        hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
+    metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
+        DEFAULT_METRICS_TTL));
+    subAppTableDescp.setRegionSplitPolicyClassName(
+        "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
+    subAppTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
+        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
+    admin.createTable(subAppTableDescp,
+        TimelineHBaseSchemaConstants.getUsernameSplits());
+    LOG.info("Status of table creation for " + table.getNameAsString() + "="
+        + admin.tableExists(table));
+  }
+
+  /**
+   * @param metricsTTL time to live parameter for the metricss in this table.
+   * @param hbaseConf configururation in which to set the metrics TTL config
+   *          variable.
+   */
+  public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
+    hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
new file mode 100644
index 0000000..52cc399
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication
+ * contains classes related to implementation for subapplication table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
new file mode 100644
index 0000000..402a89b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestHBaseTimelineStorageUtils.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.junit.Test;
+
+/**
+ * Unit tests for HBaseTimelineStorageUtils static methos.
+ */
+public class TestHBaseTimelineStorageUtils {
+
+  @Test(expected=NullPointerException.class)
+  public void testGetTimelineServiceHBaseConfNullArgument() throws Exception {
+    HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(null);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml
new file mode 100644
index 0000000..cb0d6e2
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/pom.xml
@@ -0,0 +1,132 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
+    <groupId>org.apache.hadoop</groupId>
+    <version>3.2.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
+  <name>Apache Hadoop YARN TimelineService HBase Common</name>
+  <version>3.2.0-SNAPSHOT</version>
+
+  <properties>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <yarn.basedir>${project.parent.parent.parent.basedir}</yarn.basedir>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice</artifactId>
+    </dependency>
+
+    <!-- This is needed for GenericObjectMapper in GenericConverter -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <!-- 'mvn dependency:analyze' fails to detect use of this direct
+        dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+            <phase>test-compile</phase>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-dependency-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>package</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <includeScope>runtime</includeScope>
+              <excludeGroupIds>org.slf4j,org.apache.hadoop,com.github.stephenc.findbugs</excludeGroupIds>
+              <outputDirectory>${project.build.directory}/lib</outputDirectory>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
new file mode 100644
index 0000000..c3d6a52
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies fully qualified columns for the {@link ApplicationTable}.
+ */
+public enum ApplicationColumn implements Column<ApplicationTable> {
+
+  /**
+   * App id.
+   */
+  ID(ApplicationColumnFamily.INFO, "id"),
+
+  /**
+   * When the application was created.
+   */
+  CREATED_TIME(ApplicationColumnFamily.INFO, "created_time",
+      new LongConverter()),
+
+  /**
+   * The version of the flow that this app belongs to.
+   */
+  FLOW_VERSION(ApplicationColumnFamily.INFO, "flow_version");
+
+  private final ColumnFamily<ApplicationTable> columnFamily;
+  private final String columnQualifier;
+  private final byte[] columnQualifierBytes;
+  private final ValueConverter valueConverter;
+
+  private ApplicationColumn(ColumnFamily<ApplicationTable> columnFamily,
+      String columnQualifier) {
+    this(columnFamily, columnQualifier, GenericConverter.getInstance());
+  }
+
+  private ApplicationColumn(ColumnFamily<ApplicationTable> columnFamily,
+      String columnQualifier, ValueConverter converter) {
+    this.columnFamily = columnFamily;
+    this.columnQualifier = columnQualifier;
+    // Future-proof by ensuring the right column prefix hygiene.
+    this.columnQualifierBytes =
+        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
+    this.valueConverter = converter;
+  }
+
+  /**
+   * @return the column name value
+   */
+  private String getColumnQualifier() {
+    return columnQualifier;
+  }
+
+  @Override
+  public byte[] getColumnQualifierBytes() {
+    return columnQualifierBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return attributes;
+  }
+
+  @Override
+  public boolean supplementCellTimestamp() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
new file mode 100644
index 0000000..97e5f7b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the application table column families.
+ */
+public enum ApplicationColumnFamily implements ColumnFamily<ApplicationTable> {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *          without spaces.
+   */
+  private ApplicationColumnFamily(String value) {
+    // column families should be lower case and not contain any spaces.
+    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+    return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
new file mode 100644
index 0000000..89412f4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.application;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the application table.
+ */
+public enum ApplicationColumnPrefix implements ColumnPrefix<ApplicationTable> {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(ApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an application.
+   */
+  EVENT(ApplicationColumnFamily.INFO, "e"),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(ApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnFamily<ApplicationTable> columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+  private final ValueConverter valueConverter;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private ApplicationColumnPrefix(ColumnFamily<ApplicationTable> columnFamily,
+      String columnPrefix) {
+    this(columnFamily, columnPrefix, GenericConverter.getInstance());
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  private ApplicationColumnPrefix(ColumnFamily<ApplicationTable> columnFamily,
+      String columnPrefix, ValueConverter converter) {
+    this.valueConverter = converter;
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes =
+          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
+    }
+  }
+
+  /**
+   * @return the column name value
+   */
+  private String getColumnPrefix() {
+    return columnPrefix;
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public byte[] getColumnPrefixInBytes() {
+    return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return attributes;
+  }
+
+  @Override
+  public boolean supplementCellTimeStamp() {
+    return false;
+  }
+
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HDFS-13167. DatanodeAdminManager Improvements. Contributed by BELUGA BEHR.

Posted by ha...@apache.org.
HDFS-13167. DatanodeAdminManager Improvements. Contributed by BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f81cc0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f81cc0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f81cc0b

Branch: refs/heads/HDFS-12996
Commit: 6f81cc0beea00843b44424417f09d8ee12cd7bae
Parents: 17c592e
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Feb 20 15:18:27 2018 -0800
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Feb 20 15:18:27 2018 -0800

----------------------------------------------------------------------
 .../blockmanagement/DatanodeAdminManager.java   | 27 ++++++++++----------
 1 file changed, 14 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f81cc0b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
index e338591..a1dff08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
@@ -21,8 +21,9 @@ import static com.google.common.base.Preconditions.checkArgument;
 import static org.apache.hadoop.util.Time.monotonicNow;
 
 import java.util.AbstractList;
+import java.util.ArrayDeque;
+import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Queue;
@@ -139,7 +140,7 @@ public class DatanodeAdminManager {
         new ThreadFactoryBuilder().setNameFormat("DatanodeAdminMonitor-%d")
             .setDaemon(true).build());
     outOfServiceNodeBlocks = new TreeMap<>();
-    pendingNodes = new LinkedList<>();
+    pendingNodes = new ArrayDeque<>();
   }
 
   /**
@@ -219,7 +220,7 @@ public class DatanodeAdminManager {
         pendingNodes.add(node);
       }
     } else {
-      LOG.trace("startDecommission: Node {} in {}, nothing to do." +
+      LOG.trace("startDecommission: Node {} in {}, nothing to do.",
           node, node.getAdminState());
     }
   }
@@ -242,7 +243,7 @@ public class DatanodeAdminManager {
       pendingNodes.remove(node);
       outOfServiceNodeBlocks.remove(node);
     } else {
-      LOG.trace("stopDecommission: Node {} in {}, nothing to do." +
+      LOG.trace("stopDecommission: Node {} in {}, nothing to do.",
           node, node.getAdminState());
     }
   }
@@ -272,7 +273,7 @@ public class DatanodeAdminManager {
       // IN_MAINTENANCE to support maintenance expiration.
       pendingNodes.add(node);
     } else {
-      LOG.trace("startMaintenance: Node {} in {}, nothing to do." +
+      LOG.trace("startMaintenance: Node {} in {}, nothing to do.",
           node, node.getAdminState());
     }
   }
@@ -321,7 +322,7 @@ public class DatanodeAdminManager {
       pendingNodes.remove(node);
       outOfServiceNodeBlocks.remove(node);
     } else {
-      LOG.trace("stopMaintenance: Node {} in {}, nothing to do." +
+      LOG.trace("stopMaintenance: Node {} in {}, nothing to do.",
           node, node.getAdminState());
     }
   }
@@ -395,7 +396,7 @@ public class DatanodeAdminManager {
     for (DatanodeStorageInfo storage : storages) {
       final DatanodeDescriptor node = storage.getDatanodeDescriptor();
       nodeList.append(node);
-      nodeList.append(" ");
+      nodeList.append(' ');
     }
     NameNode.blockStateChangeLog.info(
         "Block: " + block + ", Expected Replicas: "
@@ -517,7 +518,7 @@ public class DatanodeAdminManager {
       final Iterator<Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>>>
           it = new CyclicIteration<>(outOfServiceNodeBlocks,
               iterkey).iterator();
-      final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>();
+      final List<DatanodeDescriptor> toRemove = new ArrayList<>();
 
       while (it.hasNext() && !exceededNumBlocksPerCheck() && namesystem
           .isRunning()) {
@@ -583,12 +584,12 @@ public class DatanodeAdminManager {
                   "A node is in an invalid state!");
             }
             LOG.debug("Node {} is sufficiently replicated and healthy, "
-                + "marked as {}.", dn.getAdminState());
+                + "marked as {}.", dn, dn.getAdminState());
           } else {
             LOG.debug("Node {} {} healthy."
                 + " It needs to replicate {} more blocks."
                 + " {} is still in progress.", dn,
-                isHealthy? "is": "isn't", blocks.size(), dn.getAdminState());
+                isHealthy ? "is": "isn't", blocks.size(), dn.getAdminState());
           }
         } else {
           LOG.debug("Node {} still has {} blocks to replicate "
@@ -744,10 +745,10 @@ public class DatanodeAdminManager {
         lowRedundancyBlocks++;
         if (bc.isUnderConstruction()) {
           INode ucFile = namesystem.getFSDirectory().getInode(bc.getId());
-          if(!(ucFile instanceof  INodeFile) ||
+          if (!(ucFile instanceof  INodeFile) ||
               !ucFile.asFile().isUnderConstruction()) {
-            LOG.warn("File " + ucFile.getLocalName() + " is not under " +
-                "construction. Skipping add to low redundancy open files!");
+            LOG.warn("File {} is not under construction. Skipping add to " +
+                "low redundancy open files!", ucFile.getLocalName());
           } else {
             lowRedundancyBlocksInOpenFiles++;
             lowRedundancyOpenFiles.add(ucFile.getId());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-7732. Support Generic AM Simulator from SynthGenerator. (Contributed by Young Chen via curino)

Posted by ha...@apache.org.
YARN-7732. Support Generic AM Simulator from SynthGenerator. (Contributed by Young Chen via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84cea001
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84cea001
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84cea001

Branch: refs/heads/HDFS-12996
Commit: 84cea0011ffe510d24cf9f2952944f7a6fe622cf
Parents: 6f81cc0
Author: Carlo Curino <cu...@apache.org>
Authored: Tue Feb 20 17:00:34 2018 -0800
Committer: Carlo Curino <cu...@apache.org>
Committed: Tue Feb 20 17:00:34 2018 -0800

----------------------------------------------------------------------
 hadoop-tools/hadoop-sls/pom.xml                 |   2 +
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 137 +++---
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  |   2 +-
 .../yarn/sls/appmaster/MRAMSimulator.java       |   7 +-
 .../yarn/sls/appmaster/StreamAMSimulator.java   | 273 +++++++++++
 .../hadoop/yarn/sls/appmaster/package-info.java |  21 +
 .../hadoop/yarn/sls/synthetic/SynthJob.java     | 367 ++++++++------
 .../yarn/sls/synthetic/SynthJobClass.java       | 180 -------
 .../sls/synthetic/SynthTraceJobProducer.java    | 487 ++++++++++++++++---
 .../yarn/sls/synthetic/SynthWorkload.java       | 121 -----
 .../hadoop/yarn/sls/BaseSLSRunnerTest.java      |   2 +-
 .../hadoop/yarn/sls/TestSLSGenericSynth.java    |  76 +++
 .../hadoop/yarn/sls/TestSLSStreamAMSynth.java   |  76 +++
 .../hadoop/yarn/sls/TestSynthJobGeneration.java | 213 +++++++-
 .../yarn/sls/appmaster/TestAMSimulator.java     |   2 +-
 .../src/test/resources/sls-runner.xml           |   4 +
 .../hadoop-sls/src/test/resources/syn.json      |   2 +-
 .../src/test/resources/syn_generic.json         |  54 ++
 .../src/test/resources/syn_stream.json          |  46 ++
 19 files changed, 1430 insertions(+), 642 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
index a7cb9b2..ef5ac54 100644
--- a/hadoop-tools/hadoop-sls/pom.xml
+++ b/hadoop-tools/hadoop-sls/pom.xml
@@ -133,6 +133,8 @@
             <exclude>src/test/resources/simulate.info.html.template</exclude>
             <exclude>src/test/resources/track.html.template</exclude>
             <exclude>src/test/resources/syn.json</exclude>
+            <exclude>src/test/resources/syn_generic.json</exclude>
+            <exclude>src/test/resources/syn_stream.json</exclude>
             <exclude>src/test/resources/inputsls.json</exclude>
             <exclude>src/test/resources/nodes.json</exclude>
             <exclude>src/test/resources/exit-invariants.txt</exclude>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 456602f..951c09d 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -47,13 +47,11 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedTask;
 import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
-import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -627,89 +625,66 @@ public class SLSRunner extends Configured implements Tool {
     localConf.set("fs.defaultFS", "file:///");
     long baselineTimeMS = 0;
 
-    try {
+    // if we use the nodeFile this could have been not initialized yet.
+    if (stjp == null) {
+      stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
+    }
 
-      // if we use the nodeFile this could have been not initialized yet.
-      if (stjp == null) {
-        stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
-      }
+    SynthJob job = null;
+    // we use stjp, a reference to the job producer instantiated during node
+    // creation
+    while ((job = (SynthJob) stjp.getNextJob()) != null) {
+      // only support MapReduce currently
+      String user = job.getUser();
+      String jobQueue = job.getQueueName();
+      String oldJobId = job.getJobID().toString();
+      long jobStartTimeMS = job.getSubmissionTime();
 
-      SynthJob job = null;
-      // we use stjp, a reference to the job producer instantiated during node
-      // creation
-      while ((job = (SynthJob) stjp.getNextJob()) != null) {
-        // only support MapReduce currently
-        String user = job.getUser();
-        String jobQueue = job.getQueueName();
-        String oldJobId = job.getJobID().toString();
-        long jobStartTimeMS = job.getSubmissionTime();
-
-        // CARLO: Finish time is only used for logging, omit for now
-        long jobFinishTimeMS = -1L;
-
-        if (baselineTimeMS == 0) {
-          baselineTimeMS = jobStartTimeMS;
-        }
-        jobStartTimeMS -= baselineTimeMS;
-        jobFinishTimeMS -= baselineTimeMS;
-        if (jobStartTimeMS < 0) {
-          LOG.warn("Warning: reset job {} start time to 0.", oldJobId);
-          jobFinishTimeMS = jobFinishTimeMS - jobStartTimeMS;
-          jobStartTimeMS = 0;
-        }
-
-        increaseQueueAppNum(jobQueue);
-
-        List<ContainerSimulator> containerList =
-            new ArrayList<ContainerSimulator>();
-        ArrayList<NodeId> keyAsArray = new ArrayList<NodeId>(nmMap.keySet());
-        Random rand = new Random(stjp.getSeed());
-
-        // map tasks
-        for (int i = 0; i < job.getNumberMaps(); i++) {
-          TaskAttemptInfo tai = job.getTaskAttemptInfo(TaskType.MAP, i, 0);
-          RMNode node =
-              nmMap.get(keyAsArray.get(rand.nextInt(keyAsArray.size())))
-                  .getNode();
-          String hostname = "/" + node.getRackName() + "/" + node.getHostName();
-          long containerLifeTime = tai.getRuntime();
-          Resource containerResource =
-              Resource.newInstance((int) tai.getTaskInfo().getTaskMemory(),
-                  (int) tai.getTaskInfo().getTaskVCores());
-          containerList.add(new ContainerSimulator(containerResource,
-              containerLifeTime, hostname, DEFAULT_MAPPER_PRIORITY, "map"));
-        }
+      // CARLO: Finish time is only used for logging, omit for now
+      long jobFinishTimeMS = jobStartTimeMS + job.getDuration();
 
-        // reduce tasks
-        for (int i = 0; i < job.getNumberReduces(); i++) {
-          TaskAttemptInfo tai = job.getTaskAttemptInfo(TaskType.REDUCE, i, 0);
-          RMNode node =
-              nmMap.get(keyAsArray.get(rand.nextInt(keyAsArray.size())))
-                  .getNode();
-          String hostname = "/" + node.getRackName() + "/" + node.getHostName();
-          long containerLifeTime = tai.getRuntime();
-          Resource containerResource =
-              Resource.newInstance((int) tai.getTaskInfo().getTaskMemory(),
-                  (int) tai.getTaskInfo().getTaskVCores());
-          containerList.add(
-              new ContainerSimulator(containerResource, containerLifeTime,
-                  hostname, DEFAULT_REDUCER_PRIORITY, "reduce"));
-        }
+      if (baselineTimeMS == 0) {
+        baselineTimeMS = jobStartTimeMS;
+      }
+      jobStartTimeMS -= baselineTimeMS;
+      jobFinishTimeMS -= baselineTimeMS;
+      if (jobStartTimeMS < 0) {
+        LOG.warn("Warning: reset job {} start time to 0.", oldJobId);
+        jobFinishTimeMS = jobFinishTimeMS - jobStartTimeMS;
+        jobStartTimeMS = 0;
+      }
 
-        ReservationId reservationId = null;
+      increaseQueueAppNum(jobQueue);
+
+      List<ContainerSimulator> containerList =
+          new ArrayList<ContainerSimulator>();
+      ArrayList<NodeId> keyAsArray = new ArrayList<NodeId>(nmMap.keySet());
+      Random rand = new Random(stjp.getSeed());
+
+      for (SynthJob.SynthTask task : job.getTasks()) {
+        RMNode node = nmMap.get(keyAsArray.get(rand.nextInt(keyAsArray.size())))
+            .getNode();
+        String hostname = "/" + node.getRackName() + "/" + node.getHostName();
+        long containerLifeTime = task.getTime();
+        Resource containerResource = Resource
+            .newInstance((int) task.getMemory(), (int) task.getVcores());
+        containerList.add(
+            new ContainerSimulator(containerResource, containerLifeTime,
+                hostname, task.getPriority(), task.getType()));
+      }
 
-        if (job.hasDeadline()) {
-          reservationId =
-              ReservationId.newInstance(this.rm.getStartTime(), AM_ID);
-        }
 
-        runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-            jobStartTimeMS, jobFinishTimeMS, containerList, reservationId,
-            job.getDeadline(), getAMContainerResource(null));
+      ReservationId reservationId = null;
 
+      if(job.hasDeadline()){
+        reservationId = ReservationId
+            .newInstance(this.rm.getStartTime(), AM_ID);
       }
-    } finally {
-      stjp.close();
+
+      runNewAM(job.getType(), user, jobQueue, oldJobId,
+          jobStartTimeMS, jobFinishTimeMS, containerList, reservationId,
+          job.getDeadline(), getAMContainerResource(null),
+          job.getParams());
     }
   }
 
@@ -753,14 +728,14 @@ public class SLSRunner extends Configured implements Tool {
       Resource amContainerResource) {
     runNewAM(jobType, user, jobQueue, oldJobId, jobStartTimeMS,
         jobFinishTimeMS, containerList, null,  -1,
-        amContainerResource);
+        amContainerResource, null);
   }
 
   private void runNewAM(String jobType, String user,
       String jobQueue, String oldJobId, long jobStartTimeMS,
       long jobFinishTimeMS, List<ContainerSimulator> containerList,
-      ReservationId reservationId, long deadline,
-      Resource amContainerResource) {
+      ReservationId reservationId, long deadline, Resource amContainerResource,
+      Map<String, String> params) {
 
     AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
         amClassMap.get(jobType), new Configuration());
@@ -777,7 +752,7 @@ public class SLSRunner extends Configured implements Tool {
       AM_ID++;
       amSim.init(heartbeatInterval, containerList, rm, this, jobStartTimeMS,
           jobFinishTimeMS, user, jobQueue, isTracked, oldJobId,
-          runner.getStartTimeMS(), amContainerResource);
+          runner.getStartTimeMS(), amContainerResource, params);
       if(reservationId != null) {
         // if we have a ReservationId, delegate reservation creation to
         // AMSim (reservation shape is impl specific)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
index 5727b5f..bf85fff 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
@@ -121,7 +121,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
       List<ContainerSimulator> containerList, ResourceManager resourceManager,
       SLSRunner slsRunnner, long startTime, long finishTime, String simUser,
       String simQueue, boolean tracked, String oldApp, long baseTimeMS,
-      Resource amResource) {
+      Resource amResource, Map<String, String> params) {
     super.init(startTime, startTime + 1000000L * heartbeatInterval,
         heartbeatInterval);
     this.user = simUser;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
index 18a155c..6f0f85f 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
@@ -65,6 +65,9 @@ public class MRAMSimulator extends AMSimulator {
   scheduled when all maps have finished (not support slow-start currently).
   */
 
+  public static final String MAP_TYPE = "map";
+  public static final String REDUCE_TYPE = "reduce";
+
   private static final int PRIORITY_REDUCE = 10;
   private static final int PRIORITY_MAP = 20;
 
@@ -123,10 +126,10 @@ public class MRAMSimulator extends AMSimulator {
       List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
       long traceStartTime, long traceFinishTime, String user, String queue,
       boolean isTracked, String oldAppId, long baselineStartTimeMS,
-      Resource amContainerResource) {
+      Resource amContainerResource, Map<String, String> params) {
     super.init(heartbeatInterval, containerList, rm, se,
         traceStartTime, traceFinishTime, user, queue, isTracked, oldAppId,
-        baselineStartTimeMS, amContainerResource);
+        baselineStartTimeMS, amContainerResource, params);
     amtype = "mapreduce";
 
     // get map/reduce tasks

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java
new file mode 100644
index 0000000..b41f5f2
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/StreamAMSimulator.java
@@ -0,0 +1,273 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.sls.appmaster;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ReservationId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.sls.SLSRunner;
+import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * AMSimulator that simulates streaming services - it keeps tasks
+ * running and resubmits them whenever they fail or complete. It finishes
+ * when the specified duration expires.
+ */
+
+@Private
+@Unstable
+public class StreamAMSimulator extends AMSimulator {
+  /*
+  Vocabulary Used:
+  pending -> requests which are NOT yet sent to RM
+  scheduled -> requests which are sent to RM but not yet assigned
+  assigned -> requests which are assigned to a container
+  completed -> request corresponding to which container has completed
+
+  streams are constantly scheduled. If a streaming job is killed, we restart it
+  */
+
+  private static final int PRIORITY_MAP = 20;
+
+  // pending streams
+  private LinkedList<ContainerSimulator> pendingStreams =
+          new LinkedList<>();
+
+  // scheduled streams
+  private LinkedList<ContainerSimulator> scheduledStreams =
+          new LinkedList<ContainerSimulator>();
+
+  // assigned streams
+  private Map<ContainerId, ContainerSimulator> assignedStreams =
+          new HashMap<ContainerId, ContainerSimulator>();
+
+  // all streams
+  private LinkedList<ContainerSimulator> allStreams =
+          new LinkedList<ContainerSimulator>();
+
+  // finished
+  private boolean isFinished = false;
+  private long duration = 0;
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(StreamAMSimulator.class);
+
+  @SuppressWarnings("checkstyle:parameternumber")
+  public void init(int heartbeatInterval,
+      List<ContainerSimulator> containerList, ResourceManager rm, SLSRunner se,
+      long traceStartTime, long traceFinishTime, String user, String queue,
+      boolean isTracked, String oldAppId, long baselineStartTimeMS,
+      Resource amContainerResource, Map<String, String> params) {
+    super.init(heartbeatInterval, containerList, rm, se, traceStartTime,
+        traceFinishTime, user, queue, isTracked, oldAppId, baselineStartTimeMS,
+        amContainerResource, params);
+    amtype = "stream";
+
+    allStreams.addAll(containerList);
+
+    duration = traceFinishTime - traceStartTime;
+
+    LOG.info("Added new job with {} streams, running for {}",
+        allStreams.size(), duration);
+  }
+
+  @Override
+  public synchronized void notifyAMContainerLaunched(Container masterContainer)
+      throws Exception {
+    if (null != masterContainer) {
+      restart();
+      super.notifyAMContainerLaunched(masterContainer);
+    }
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  protected void processResponseQueue() throws Exception {
+    while (!responseQueue.isEmpty()) {
+      AllocateResponse response = responseQueue.take();
+
+      // check completed containers
+      if (!response.getCompletedContainersStatuses().isEmpty()) {
+        for (ContainerStatus cs : response.getCompletedContainersStatuses()) {
+          ContainerId containerId = cs.getContainerId();
+          if(assignedStreams.containsKey(containerId)){
+            // One of our containers completed. Regardless of reason,
+            // we want to maintain our streaming process
+            LOG.debug("Application {} has one streamer finished ({}).", appId,
+                containerId);
+            pendingStreams.add(assignedStreams.remove(containerId));
+          } else if (amContainer.getId().equals(containerId)){
+            // Our am container completed
+            if(cs.getExitStatus() == ContainerExitStatus.SUCCESS){
+              // am container released event (am container completed on success)
+              isAMContainerRunning = false;
+              isFinished = true;
+              LOG.info("Application {} goes to finish.", appId);
+            } else {
+              // am container killed - wait for re allocation
+              LOG.info("Application {}'s AM is "
+                  + "going to be killed. Waiting for rescheduling...", appId);
+              isAMContainerRunning = false;
+            }
+          }
+        }
+      }
+
+      // check finished
+      if (isAMContainerRunning &&
+          (System.currentTimeMillis() - simulateStartTimeMS >= duration)) {
+        LOG.debug("Application {} sends out event to clean up"
+                + " its AM container.", appId);
+        isAMContainerRunning = false;
+        isFinished = true;
+        break;
+      }
+
+      // check allocated containers
+      for (Container container : response.getAllocatedContainers()) {
+        if (!scheduledStreams.isEmpty()) {
+          ContainerSimulator cs = scheduledStreams.remove();
+          LOG.debug("Application {} starts to launch a stream ({}).", appId,
+              container.getId());
+          assignedStreams.put(container.getId(), cs);
+          se.getNmMap().get(container.getNodeId()).addNewContainer(container,
+              cs.getLifeTime());
+        }
+      }
+    }
+  }
+
+  /**
+   * restart running because of the am container killed.
+   */
+  private void restart()
+          throws YarnException, IOException, InterruptedException {
+    // clear
+    isFinished = false;
+    pendingStreams.clear();
+    pendingStreams.addAll(allStreams);
+
+    amContainer = null;
+  }
+
+  private List<ContainerSimulator> mergeLists(List<ContainerSimulator> left,
+      List<ContainerSimulator> right) {
+    List<ContainerSimulator> list = new ArrayList<>();
+    list.addAll(left);
+    list.addAll(right);
+    return list;
+  }
+
+  @Override
+  protected void sendContainerRequest()
+          throws YarnException, IOException, InterruptedException {
+
+    // send out request
+    List<ResourceRequest> ask = new ArrayList<>();
+    List<ContainerId> release = new ArrayList<>();
+    if (!isFinished) {
+      if (!pendingStreams.isEmpty()) {
+        ask = packageRequests(mergeLists(pendingStreams, scheduledStreams),
+            PRIORITY_MAP);
+        LOG.debug("Application {} sends out request for {} streams.",
+            appId, pendingStreams.size());
+        scheduledStreams.addAll(pendingStreams);
+        pendingStreams.clear();
+      }
+    }
+
+    if(isFinished){
+      release.addAll(assignedStreams.keySet());
+      ask.clear();
+    }
+
+    final AllocateRequest request = createAllocateRequest(ask, release);
+    if (totalContainers == 0) {
+      request.setProgress(1.0f);
+    } else {
+      request.setProgress((float) finishedContainers / totalContainers);
+    }
+
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser(appAttemptId.toString());
+    Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps()
+        .get(appAttemptId.getApplicationId())
+        .getRMAppAttempt(appAttemptId).getAMRMToken();
+    ugi.addTokenIdentifier(token.decodeIdentifier());
+    AllocateResponse response = ugi.doAs(
+        new PrivilegedExceptionAction<AllocateResponse>() {
+          @Override
+          public AllocateResponse run() throws Exception {
+            return rm.getApplicationMasterService().allocate(request);
+          }
+        });
+    if (response != null) {
+      responseQueue.put(response);
+    }
+  }
+
+  @Override
+  public void initReservation(
+      ReservationId reservationId, long deadline, long now){
+    // Streaming AM currently doesn't do reservations
+    setReservationRequest(null);
+  }
+
+  @Override
+  protected void checkStop() {
+    if (isFinished) {
+      super.setEndTime(System.currentTimeMillis());
+    }
+  }
+
+  @Override
+  public void lastStep() throws Exception {
+    super.lastStep();
+
+    // clear data structures
+    allStreams.clear();
+    assignedStreams.clear();
+    pendingStreams.clear();
+    scheduledStreams.clear();
+    responseQueue.clear();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/package-info.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/package-info.java
new file mode 100644
index 0000000..ead315b
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/package-info.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Application Master simulators for the SLS.
+ */
+package org.apache.hadoop.yarn.sls.appmaster;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJob.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJob.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJob.java
index 3ed81e1..27156c7 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJob.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJob.java
@@ -19,19 +19,25 @@ package org.apache.hadoop.yarn.sls.synthetic;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.math3.distribution.LogNormalDistribution;
 import org.apache.commons.math3.random.JDKRandomGenerator;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.TaskStatus.State;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.tools.rumen.*;
+import org.apache.hadoop.tools.rumen.JobStory;
+import org.apache.hadoop.tools.rumen.MapTaskAttemptInfo;
+import org.apache.hadoop.tools.rumen.ReduceTaskAttemptInfo;
+import org.apache.hadoop.tools.rumen.TaskAttemptInfo;
+import org.apache.hadoop.tools.rumen.TaskInfo;
 import org.apache.hadoop.tools.rumen.Pre21JobHistoryConstants.Values;
+import org.apache.hadoop.yarn.sls.appmaster.MRAMSimulator;
 
-import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
@@ -46,6 +52,9 @@ public class SynthJob implements JobStory {
   @SuppressWarnings("StaticVariableName")
   private static Log LOG = LogFactory.getLog(SynthJob.class);
 
+  private static final long MIN_MEMORY = 1024;
+  private static final long MIN_VCORES = 1;
+
   private final Configuration conf;
   private final int id;
 
@@ -53,75 +62,93 @@ public class SynthJob implements JobStory {
   private static final AtomicInteger sequence = new AtomicInteger(0);
   private final String name;
   private final String queueName;
-  private final SynthJobClass jobClass;
+  private final SynthTraceJobProducer.JobDefinition jobDef;
+
+  private String type;
 
   // job timing
   private final long submitTime;
   private final long duration;
   private final long deadline;
 
-  private final int numMapTasks;
-  private final int numRedTasks;
-  private final long mapMaxMemory;
-  private final long reduceMaxMemory;
-  private final long mapMaxVcores;
-  private final long reduceMaxVcores;
-  private final long[] mapRuntime;
-  private final float[] reduceRuntime;
-  private long totMapRuntime;
-  private long totRedRuntime;
+  private Map<String, String> params;
+
+  private long totalSlotTime = 0;
+
+  // task information
+  private List<SynthTask> tasks = new ArrayList<>();
+  private Map<String, List<SynthTask>> taskByType = new HashMap<>();
+  private Map<String, Integer> taskCounts = new HashMap<>();
+  private Map<String, Long> taskMemory = new HashMap<>();
+  private Map<String, Long> taskVcores = new HashMap<>();
+
+  /**
+   * Nested class used to represent a task instance in a job. Each task
+   * corresponds to one container allocation for the job.
+   */
+  public static final class SynthTask{
+    private String type;
+    private long time;
+    private long maxMemory;
+    private long maxVcores;
+    private int priority;
+
+    private SynthTask(String type, long time, long maxMemory, long maxVcores,
+        int priority){
+      this.type = type;
+      this.time = time;
+      this.maxMemory = maxMemory;
+      this.maxVcores = maxVcores;
+      this.priority = priority;
+    }
+
+    public String getType(){
+      return type;
+    }
 
-  public SynthJob(JDKRandomGenerator rand, Configuration conf,
-      SynthJobClass jobClass, long actualSubmissionTime) {
+    public long getTime(){
+      return time;
+    }
 
-    this.conf = conf;
-    this.jobClass = jobClass;
-
-    this.duration = MILLISECONDS.convert(jobClass.getDur(), SECONDS);
-    this.numMapTasks = jobClass.getMtasks();
-    this.numRedTasks = jobClass.getRtasks();
-
-    // sample memory distributions, correct for sub-minAlloc sizes
-    long tempMapMaxMemory = jobClass.getMapMaxMemory();
-    this.mapMaxMemory = tempMapMaxMemory < MRJobConfig.DEFAULT_MAP_MEMORY_MB
-        ? MRJobConfig.DEFAULT_MAP_MEMORY_MB : tempMapMaxMemory;
-    long tempReduceMaxMemory = jobClass.getReduceMaxMemory();
-    this.reduceMaxMemory =
-            tempReduceMaxMemory < MRJobConfig.DEFAULT_REDUCE_MEMORY_MB
-            ? MRJobConfig.DEFAULT_REDUCE_MEMORY_MB : tempReduceMaxMemory;
-
-    // sample vcores distributions, correct for sub-minAlloc sizes
-    long tempMapMaxVCores = jobClass.getMapMaxVcores();
-    this.mapMaxVcores = tempMapMaxVCores < MRJobConfig.DEFAULT_MAP_CPU_VCORES
-        ? MRJobConfig.DEFAULT_MAP_CPU_VCORES : tempMapMaxVCores;
-    long tempReduceMaxVcores = jobClass.getReduceMaxVcores();
-    this.reduceMaxVcores =
-        tempReduceMaxVcores < MRJobConfig.DEFAULT_REDUCE_CPU_VCORES
-            ? MRJobConfig.DEFAULT_REDUCE_CPU_VCORES : tempReduceMaxVcores;
-
-    if (numMapTasks > 0) {
-      conf.setLong(MRJobConfig.MAP_MEMORY_MB, this.mapMaxMemory);
-      conf.set(MRJobConfig.MAP_JAVA_OPTS,
-          "-Xmx" + (this.mapMaxMemory - 100) + "m");
+    public long getMemory(){
+      return maxMemory;
     }
 
-    if (numRedTasks > 0) {
-      conf.setLong(MRJobConfig.REDUCE_MEMORY_MB, this.reduceMaxMemory);
-      conf.set(MRJobConfig.REDUCE_JAVA_OPTS,
-          "-Xmx" + (this.reduceMaxMemory - 100) + "m");
+    public long getVcores(){
+      return maxVcores;
     }
 
-    boolean hasDeadline =
-        (rand.nextDouble() <= jobClass.jobClass.chance_of_reservation);
+    public int getPriority(){
+      return priority;
+    }
+
+    @Override
+    public String toString(){
+      return String.format("[task]\ttype: %1$-10s\ttime: %2$3s\tmemory: "
+              + "%3$4s\tvcores: %4$2s%n", getType(), getTime(), getMemory(),
+          getVcores());
+    }
+  }
 
-    LogNormalDistribution deadlineFactor =
-        SynthUtils.getLogNormalDist(rand, jobClass.jobClass.deadline_factor_avg,
-            jobClass.jobClass.deadline_factor_stddev);
 
-    double deadlineFactorSample =
-        (deadlineFactor != null) ? deadlineFactor.sample() : -1;
+  protected SynthJob(JDKRandomGenerator rand, Configuration conf,
+      SynthTraceJobProducer.JobDefinition jobDef,
+      String queue, long actualSubmissionTime) {
 
-    this.queueName = jobClass.workload.getQueueName();
+    this.conf = conf;
+    this.jobDef = jobDef;
+
+    this.queueName = queue;
+
+    this.duration = MILLISECONDS.convert(jobDef.duration.getInt(),
+        SECONDS);
+
+    boolean hasDeadline =
+        (rand.nextDouble() <= jobDef.reservation.getDouble());
+
+    double deadlineFactorSample = jobDef.deadline_factor.getDouble();
+
+    this.type = jobDef.type;
 
     this.submitTime = MILLISECONDS.convert(actualSubmissionTime, SECONDS);
 
@@ -129,6 +156,8 @@ public class SynthJob implements JobStory {
         hasDeadline ? MILLISECONDS.convert(actualSubmissionTime, SECONDS)
             + (long) Math.ceil(deadlineFactorSample * duration) : -1;
 
+    this.params = jobDef.params;
+
     conf.set(QUEUE_NAME, queueName);
 
     // name and initialize job randomness
@@ -136,79 +165,166 @@ public class SynthJob implements JobStory {
     rand.setSeed(seed);
     id = sequence.getAndIncrement();
 
-    name = String.format(jobClass.getClassName() + "_%06d", id);
+    name = String.format(jobDef.class_name + "_%06d", id);
     LOG.debug(name + " (" + seed + ")");
 
     LOG.info("JOB TIMING`: job: " + name + " submission:" + submitTime
         + " deadline:" + deadline + " duration:" + duration
         + " deadline-submission: " + (deadline - submitTime));
 
-    // generate map and reduce runtimes
-    mapRuntime = new long[numMapTasks];
-    for (int i = 0; i < numMapTasks; i++) {
-      mapRuntime[i] = jobClass.getMapTimeSample();
-      totMapRuntime += mapRuntime[i];
-    }
-    reduceRuntime = new float[numRedTasks];
-    for (int i = 0; i < numRedTasks; i++) {
-      reduceRuntime[i] = jobClass.getReduceTimeSample();
-      totRedRuntime += (long) Math.ceil(reduceRuntime[i]);
+    // Expand tasks
+    for(SynthTraceJobProducer.TaskDefinition task : jobDef.tasks){
+      int num = task.count.getInt();
+      String taskType = task.type;
+      long memory = task.max_memory.getLong();
+      memory = memory < MIN_MEMORY ? MIN_MEMORY: memory;
+      long vcores = task.max_vcores.getLong();
+      vcores = vcores < MIN_VCORES ? MIN_VCORES  : vcores;
+      int priority = task.priority;
+
+      // Save task information by type
+      taskByType.put(taskType, new ArrayList<>());
+      taskCounts.put(taskType, num);
+      taskMemory.put(taskType, memory);
+      taskVcores.put(taskType, vcores);
+
+      for(int i = 0; i < num; ++i){
+        long time = task.time.getLong();
+        totalSlotTime += time;
+        SynthTask t = new SynthTask(taskType, time, memory, vcores,
+            priority);
+        tasks.add(t);
+        taskByType.get(taskType).add(t);
+      }
     }
+
+  }
+
+  public String getType(){
+    return type;
+  }
+
+  public List<SynthTask> getTasks(){
+    return tasks;
   }
 
   public boolean hasDeadline() {
     return deadline > 0;
   }
 
-  @Override
   public String getName() {
     return name;
   }
 
-  @Override
   public String getUser() {
-    return jobClass.getUserName();
+    return jobDef.user_name;
   }
 
-  @Override
   public JobID getJobID() {
     return new JobID("job_mock_" + name, id);
   }
 
+  public long getSubmissionTime() {
+    return submitTime;
+  }
+
+  public String getQueueName() {
+    return queueName;
+  }
+
   @Override
-  public Values getOutcome() {
-    return Values.SUCCESS;
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    String res = "\nSynthJob [" + jobDef.class_name + "]: \n"
+        + "\tname: " + getName() + "\n"
+        + "\ttype: " + getType() + "\n"
+        + "\tid: " + id + "\n"
+        + "\tqueue: " + getQueueName() + "\n"
+        + "\tsubmission: " + getSubmissionTime() + "\n"
+        + "\tduration: " + getDuration() + "\n"
+        + "\tdeadline: " + getDeadline() + "\n";
+    sb.append(res);
+    int taskno = 0;
+    for(SynthJob.SynthTask t : getTasks()){
+      sb.append("\t");
+      sb.append(taskno);
+      sb.append(": \t");
+      sb.append(t.toString());
+      taskno++;
+    }
+    return sb.toString();
+  }
+
+  public long getTotalSlotTime() {
+    return totalSlotTime;
+  }
+
+  public long getDuration() {
+    return duration;
+  }
+
+  public long getDeadline() {
+    return deadline;
+  }
+
+  public Map<String, String> getParams() {
+    return params;
   }
 
   @Override
-  public long getSubmissionTime() {
-    return submitTime;
+  public boolean equals(Object other) {
+    if (!(other instanceof SynthJob)) {
+      return false;
+    }
+    SynthJob o = (SynthJob) other;
+    return tasks.equals(o.tasks)
+        && submitTime == o.submitTime
+        && type.equals(o.type)
+        && queueName.equals(o.queueName)
+        && jobDef.class_name.equals(o.jobDef.class_name);
+  }
+
+  @Override
+  public int hashCode() {
+    return jobDef.class_name.hashCode()
+        * (int) submitTime * (int) duration;
+  }
+
+
+  @Override
+  public JobConf getJobConf() {
+    return new JobConf(conf);
   }
 
   @Override
   public int getNumberMaps() {
-    return numMapTasks;
+    return taskCounts.get(MRAMSimulator.MAP_TYPE);
   }
 
   @Override
   public int getNumberReduces() {
-    return numRedTasks;
+    return taskCounts.get(MRAMSimulator.REDUCE_TYPE);
+  }
+
+  @Override
+  public InputSplit[] getInputSplits() {
+    throw new UnsupportedOperationException();
   }
 
   @Override
   public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
-    switch (taskType) {
+    switch(taskType){
     case MAP:
-      return new TaskInfo(-1, -1, -1, -1, mapMaxMemory, mapMaxVcores);
+      return new TaskInfo(-1, -1, -1, -1,
+          taskMemory.get(MRAMSimulator.MAP_TYPE),
+          taskVcores.get(MRAMSimulator.MAP_TYPE));
     case REDUCE:
-      return new TaskInfo(-1, -1, -1, -1, reduceMaxMemory, reduceMaxVcores);
+      return new TaskInfo(-1, -1, -1, -1,
+          taskMemory.get(MRAMSimulator.REDUCE_TYPE),
+          taskVcores.get(MRAMSimulator.REDUCE_TYPE));
     default:
-      throw new IllegalArgumentException("Not interested");
+      break;
     }
-  }
-
-  @Override
-  public InputSplit[] getInputSplits() {
     throw new UnsupportedOperationException();
   }
 
@@ -218,17 +334,20 @@ public class SynthJob implements JobStory {
     switch (taskType) {
     case MAP:
       return new MapTaskAttemptInfo(State.SUCCEEDED,
-          getTaskInfo(taskType, taskNumber), mapRuntime[taskNumber], null);
-
+          getTaskInfo(taskType, taskNumber),
+          taskByType.get(MRAMSimulator.MAP_TYPE).get(taskNumber).time,
+          null);
     case REDUCE:
       // We assume uniform split between pull/sort/reduce
       // aligned with naive progress reporting assumptions
       return new ReduceTaskAttemptInfo(State.SUCCEEDED,
           getTaskInfo(taskType, taskNumber),
-          (long) Math.round((reduceRuntime[taskNumber] / 3)),
-          (long) Math.round((reduceRuntime[taskNumber] / 3)),
-          (long) Math.round((reduceRuntime[taskNumber] / 3)), null);
-
+          taskByType.get(MRAMSimulator.MAP_TYPE)
+              .get(taskNumber).time / 3,
+          taskByType.get(MRAMSimulator.MAP_TYPE)
+              .get(taskNumber).time / 3,
+          taskByType.get(MRAMSimulator.MAP_TYPE)
+              .get(taskNumber).time / 3, null);
     default:
       break;
     }
@@ -242,65 +361,7 @@ public class SynthJob implements JobStory {
   }
 
   @Override
-  public org.apache.hadoop.mapred.JobConf getJobConf() {
-    return new JobConf(conf);
-  }
-
-  @Override
-  public String getQueueName() {
-    return queueName;
-  }
-
-  @Override
-  public String toString() {
-    return "SynthJob [\n" + "  workload=" + jobClass.getWorkload().getId()
-        + "\n" + "  jobClass="
-        + jobClass.getWorkload().getClassList().indexOf(jobClass) + "\n"
-        + "  conf=" + conf + ",\n" + "  id=" + id + ",\n" + "  name=" + name
-        + ",\n" + "  mapRuntime=" + Arrays.toString(mapRuntime) + ",\n"
-        + "  reduceRuntime=" + Arrays.toString(reduceRuntime) + ",\n"
-        + "  submitTime=" + submitTime + ",\n" + "  numMapTasks=" + numMapTasks
-        + ",\n" + "  numRedTasks=" + numRedTasks + ",\n" + "  mapMaxMemory="
-        + mapMaxMemory + ",\n" + "  reduceMaxMemory=" + reduceMaxMemory + ",\n"
-        + "  queueName=" + queueName + "\n" + "]";
-  }
-
-  public SynthJobClass getJobClass() {
-    return jobClass;
-  }
-
-  public long getTotalSlotTime() {
-    return totMapRuntime + totRedRuntime;
-  }
-
-  public long getDuration() {
-    return duration;
-  }
-
-  public long getDeadline() {
-    return deadline;
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof SynthJob)) {
-      return false;
-    }
-    SynthJob o = (SynthJob) other;
-    return Arrays.equals(mapRuntime, o.mapRuntime)
-        && Arrays.equals(reduceRuntime, o.reduceRuntime)
-        && submitTime == o.submitTime && numMapTasks == o.numMapTasks
-        && numRedTasks == o.numRedTasks && mapMaxMemory == o.mapMaxMemory
-        && reduceMaxMemory == o.reduceMaxMemory
-        && mapMaxVcores == o.mapMaxVcores
-        && reduceMaxVcores == o.reduceMaxVcores && queueName.equals(o.queueName)
-        && jobClass.equals(o.jobClass) && totMapRuntime == o.totMapRuntime
-        && totRedRuntime == o.totRedRuntime;
-  }
-
-  @Override
-  public int hashCode() {
-    // could have a bad distr; investigate if a relevant use case exists
-    return jobClass.hashCode() * (int) submitTime;
+  public Values getOutcome() {
+    return Values.SUCCESS;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJobClass.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJobClass.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJobClass.java
deleted file mode 100644
index 439698f..0000000
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthJobClass.java
+++ /dev/null
@@ -1,180 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.sls.synthetic;
-
-import org.apache.commons.math3.distribution.AbstractRealDistribution;
-import org.apache.commons.math3.distribution.LogNormalDistribution;
-import org.apache.commons.math3.random.JDKRandomGenerator;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.tools.rumen.JobStory;
-import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer.JobClass;
-import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer.Trace;
-
-/**
- * This is a class that represent a class of Jobs. It is used to generate an
- * individual job, by picking random durations, task counts, container size,
- * etc.
- */
-public class SynthJobClass {
-
-  private final JDKRandomGenerator rand;
-  private final LogNormalDistribution dur;
-  private final LogNormalDistribution mapRuntime;
-  private final LogNormalDistribution redRuntime;
-  private final LogNormalDistribution mtasks;
-  private final LogNormalDistribution rtasks;
-  private final LogNormalDistribution mapMem;
-  private final LogNormalDistribution redMem;
-  private final LogNormalDistribution mapVcores;
-  private final LogNormalDistribution redVcores;
-
-  private final Trace trace;
-  @SuppressWarnings("VisibilityModifier")
-  protected final SynthWorkload workload;
-  @SuppressWarnings("VisibilityModifier")
-  protected final JobClass jobClass;
-
-  public SynthJobClass(JDKRandomGenerator rand, Trace trace,
-      SynthWorkload workload, int classId) {
-
-    this.trace = trace;
-    this.workload = workload;
-    this.rand = new JDKRandomGenerator();
-    this.rand.setSeed(rand.nextLong());
-    jobClass = trace.workloads.get(workload.getId()).job_classes.get(classId);
-
-    this.dur = SynthUtils.getLogNormalDist(rand, jobClass.dur_avg,
-        jobClass.dur_stddev);
-    this.mapRuntime = SynthUtils.getLogNormalDist(rand, jobClass.mtime_avg,
-        jobClass.mtime_stddev);
-    this.redRuntime = SynthUtils.getLogNormalDist(rand, jobClass.rtime_avg,
-        jobClass.rtime_stddev);
-    this.mtasks = SynthUtils.getLogNormalDist(rand, jobClass.mtasks_avg,
-        jobClass.mtasks_stddev);
-    this.rtasks = SynthUtils.getLogNormalDist(rand, jobClass.rtasks_avg,
-        jobClass.rtasks_stddev);
-
-    this.mapMem = SynthUtils.getLogNormalDist(rand, jobClass.map_max_memory_avg,
-        jobClass.map_max_memory_stddev);
-    this.redMem = SynthUtils.getLogNormalDist(rand,
-        jobClass.reduce_max_memory_avg, jobClass.reduce_max_memory_stddev);
-    this.mapVcores = SynthUtils.getLogNormalDist(rand,
-        jobClass.map_max_vcores_avg, jobClass.map_max_vcores_stddev);
-    this.redVcores = SynthUtils.getLogNormalDist(rand,
-        jobClass.reduce_max_vcores_avg, jobClass.reduce_max_vcores_stddev);
-  }
-
-  public JobStory getJobStory(Configuration conf, long actualSubmissionTime) {
-    return new SynthJob(rand, conf, this, actualSubmissionTime);
-  }
-
-  @Override
-  public String toString() {
-    return "SynthJobClass [workload=" + workload.getName() + ", class="
-        + jobClass.class_name + " job_count=" + jobClass.class_weight + ", dur="
-        + ((dur != null) ? dur.getNumericalMean() : 0) + ", mapRuntime="
-        + ((mapRuntime != null) ? mapRuntime.getNumericalMean() : 0)
-        + ", redRuntime="
-        + ((redRuntime != null) ? redRuntime.getNumericalMean() : 0)
-        + ", mtasks=" + ((mtasks != null) ? mtasks.getNumericalMean() : 0)
-        + ", rtasks=" + ((rtasks != null) ? rtasks.getNumericalMean() : 0)
-        + ", chance_of_reservation=" + jobClass.chance_of_reservation + "]\n";
-
-  }
-
-  public double getClassWeight() {
-    return jobClass.class_weight;
-  }
-
-  public long getDur() {
-    return genLongSample(dur);
-  }
-
-  public int getMtasks() {
-    return genIntSample(mtasks);
-  }
-
-  public int getRtasks() {
-    return genIntSample(rtasks);
-  }
-
-  public long getMapMaxMemory() {
-    return genLongSample(mapMem);
-  }
-
-  public long getReduceMaxMemory() {
-    return genLongSample(redMem);
-  }
-
-  public long getMapMaxVcores() {
-    return genLongSample(mapVcores);
-  }
-
-  public long getReduceMaxVcores() {
-    return genLongSample(redVcores);
-  }
-
-  public SynthWorkload getWorkload() {
-    return workload;
-  }
-
-  public int genIntSample(AbstractRealDistribution dist) {
-    if (dist == null) {
-      return 0;
-    }
-    double baseSample = dist.sample();
-    if (baseSample < 0) {
-      baseSample = 0;
-    }
-    return (int) (Integer.MAX_VALUE & (long) Math.ceil(baseSample));
-  }
-
-  public long genLongSample(AbstractRealDistribution dist) {
-    return dist != null ? (long) Math.ceil(dist.sample()) : 0;
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof SynthJobClass)) {
-      return false;
-    }
-    SynthJobClass o = (SynthJobClass) other;
-    return workload.equals(o.workload);
-  }
-
-  @Override
-  public int hashCode() {
-    return workload.hashCode() * workload.getId();
-  }
-
-  public String getClassName() {
-    return jobClass.class_name;
-  }
-
-  public long getMapTimeSample() {
-    return genLongSample(mapRuntime);
-  }
-
-  public long getReduceTimeSample() {
-    return genLongSample(redRuntime);
-  }
-
-  public String getUserName() {
-    return jobClass.user_name;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
index c89e4e2..09bc9b9 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthTraceJobProducer.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.sls.synthetic;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.math3.distribution.AbstractRealDistribution;
 import org.apache.commons.math3.random.JDKRandomGenerator;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -26,7 +27,11 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.tools.rumen.JobStory;
 import org.apache.hadoop.tools.rumen.JobStoryProducer;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.sls.appmaster.MRAMSimulator;
+import org.codehaus.jackson.annotate.JsonCreator;
 import org.codehaus.jackson.annotate.JsonProperty;
+import org.codehaus.jackson.map.JsonMappingException;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import javax.xml.bind.annotation.XmlRootElement;
@@ -39,7 +44,7 @@ import static org.codehaus.jackson.map.DeserializationConfig.Feature.FAIL_ON_UNK
 
 /**
  * This is a JobStoryProducer that operates from distribution of different
- * workloads. The .json input file is used to determine how many jobs, which
+ * workloads. The .json input file is used to determine how many weight, which
  * size, number of maps/reducers and their duration, as well as the temporal
  * distributed of submissions. For each parameter we control avg and stdev, and
  * generate values via normal or log-normal distributions.
@@ -55,8 +60,6 @@ public class SynthTraceJobProducer implements JobStoryProducer {
   private final long seed;
 
   private int totalWeight;
-  private final List<Double> weightList;
-  private final Map<Integer, SynthWorkload> workloads;
 
   private final Queue<StoryParams> listStoryParams;
 
@@ -65,6 +68,9 @@ public class SynthTraceJobProducer implements JobStoryProducer {
   public static final String SLS_SYNTHETIC_TRACE_FILE =
       "sls.synthetic" + ".trace_file";
 
+  private final static int DEFAULT_MAPPER_PRIORITY = 20;
+  private final static int DEFAULT_REDUCER_PRIORITY = 10;
+
   public SynthTraceJobProducer(Configuration conf) throws IOException {
     this(conf, new Path(conf.get(SLS_SYNTHETIC_TRACE_FILE)));
   }
@@ -76,8 +82,6 @@ public class SynthTraceJobProducer implements JobStoryProducer {
 
     this.conf = conf;
     this.rand = new JDKRandomGenerator();
-    workloads = new HashMap<Integer, SynthWorkload>();
-    weightList = new ArrayList<Double>();
 
     ObjectMapper mapper = new ObjectMapper();
     mapper.configure(INTERN_FIELD_NAMES, true);
@@ -86,44 +90,132 @@ public class SynthTraceJobProducer implements JobStoryProducer {
     FileSystem ifs = path.getFileSystem(conf);
     FSDataInputStream fileIn = ifs.open(path);
 
+    // Initialize the random generator and the seed
     this.trace = mapper.readValue(fileIn, Trace.class);
-    seed = trace.rand_seed;
-    rand.setSeed(seed);
+    this.seed = trace.rand_seed;
+    this.rand.setSeed(seed);
+    // Initialize the trace
+    this.trace.init(rand);
 
     this.numJobs = new AtomicInteger(trace.num_jobs);
 
-    for (int workloadId = 0; workloadId < trace.workloads
-        .size(); workloadId++) {
-      SynthWorkload workload = new SynthWorkload(workloadId, trace);
-      for (int classId =
-          0; classId < trace.workloads.get(workloadId).job_classes
-              .size(); classId++) {
-        SynthJobClass cls = new SynthJobClass(rand, trace, workload, classId);
-        workload.add(cls);
-      }
-      workloads.put(workloadId, workload);
+    for (Double w : trace.workload_weights) {
+      totalWeight += w;
     }
 
-    for (int i = 0; i < workloads.size(); i++) {
-      double w = workloads.get(i).getWorkloadWeight();
-      totalWeight += w;
-      weightList.add(w);
+    // Initialize our story parameters
+    listStoryParams = createStory();
+
+    LOG.info("Generated " + listStoryParams.size() + " deadlines for "
+        + this.numJobs.get() + " jobs");
+  }
+
+  // StoryParams hold the minimum amount of information needed to completely
+  // specify a job run: job definition, start time, and queue.
+  // This allows us to create "jobs" and then order them according to start time
+  static class StoryParams {
+    // Time the job gets submitted to
+    private long actualSubmissionTime;
+    // The queue the job gets submitted to
+    private String queue;
+    // Definition to construct the job from
+    private JobDefinition jobDef;
+
+    StoryParams(long actualSubmissionTime, String queue, JobDefinition jobDef) {
+      this.actualSubmissionTime = actualSubmissionTime;
+      this.queue = queue;
+      this.jobDef = jobDef;
     }
+  }
 
+
+  private Queue<StoryParams> createStory() {
     // create priority queue to keep start-time sorted
-    listStoryParams =
-        new PriorityQueue<StoryParams>(10, new Comparator<StoryParams>() {
+    Queue<StoryParams> storyQueue =
+        new PriorityQueue<>(this.numJobs.get(), new Comparator<StoryParams>() {
           @Override
           public int compare(StoryParams o1, StoryParams o2) {
             return Math
-                .toIntExact(o2.actualSubmissionTime - o1.actualSubmissionTime);
+                .toIntExact(o1.actualSubmissionTime - o2.actualSubmissionTime);
           }
         });
+    for (int i = 0; i < numJobs.get(); i++) {
+      // Generate a workload
+      Workload wl = trace.generateWorkload();
+      // Save all the parameters needed to completely define a job
+      long actualSubmissionTime = wl.generateSubmissionTime();
+      String queue = wl.queue_name;
+      JobDefinition job = wl.generateJobDefinition();
+      storyQueue.add(new StoryParams(actualSubmissionTime, queue, job));
+    }
+    return storyQueue;
+  }
 
-    // initialize it
-    createStoryParams();
-    LOG.info("Generated " + listStoryParams.size() + " deadlines for "
-        + this.numJobs.get() + " jobs ");
+  @Override
+  public JobStory getNextJob() throws IOException {
+    if (numJobs.decrementAndGet() < 0) {
+      return null;
+    }
+    StoryParams storyParams = listStoryParams.poll();
+    return new SynthJob(rand, conf, storyParams.jobDef, storyParams.queue,
+        storyParams.actualSubmissionTime);
+  }
+
+  @Override
+  public void close(){
+  }
+
+  @Override
+  public String toString() {
+    return "SynthTraceJobProducer [ conf=" + conf + ", numJobs=" + numJobs
+        + ", r=" + rand + ", totalWeight="
+        + totalWeight + ", workloads=" + trace.workloads + "]";
+  }
+
+  public int getNumJobs() {
+    return trace.num_jobs;
+  }
+
+  // Helper to parse and maintain backwards compatibility with
+  // syn json formats
+  private static void validateJobDef(JobDefinition jobDef){
+    if(jobDef.tasks == null) {
+      LOG.info("Detected old JobDefinition format. Converting.");
+      try {
+        jobDef.tasks = new ArrayList<>();
+        jobDef.type = "mapreduce";
+        jobDef.deadline_factor = new Sample(jobDef.deadline_factor_avg,
+            jobDef.deadline_factor_stddev);
+        jobDef.duration = new Sample(jobDef.dur_avg,
+            jobDef.dur_stddev);
+        jobDef.reservation = new Sample(jobDef.chance_of_reservation);
+
+        TaskDefinition map = new TaskDefinition();
+        map.type = MRAMSimulator.MAP_TYPE;
+        map.count = new Sample(jobDef.mtasks_avg, jobDef.mtasks_stddev);
+        map.time = new Sample(jobDef.mtime_avg, jobDef.mtime_stddev);
+        map.max_memory = new Sample((double) jobDef.map_max_memory_avg,
+            jobDef.map_max_memory_stddev);
+        map.max_vcores = new Sample((double) jobDef.map_max_vcores_avg,
+            jobDef.map_max_vcores_stddev);
+        map.priority = DEFAULT_MAPPER_PRIORITY;
+
+        jobDef.tasks.add(map);
+        TaskDefinition reduce = new TaskDefinition();
+        reduce.type = MRAMSimulator.REDUCE_TYPE;
+        reduce.count = new Sample(jobDef.rtasks_avg, jobDef.rtasks_stddev);
+        reduce.time = new Sample(jobDef.rtime_avg, jobDef.rtime_stddev);
+        reduce.max_memory = new Sample((double) jobDef.reduce_max_memory_avg,
+            jobDef.reduce_max_memory_stddev);
+        reduce.max_vcores = new Sample((double) jobDef.reduce_max_vcores_avg,
+            jobDef.reduce_max_vcores_stddev);
+        reduce.priority = DEFAULT_REDUCER_PRIORITY;
+
+        jobDef.tasks.add(reduce);
+      } catch (JsonMappingException e) {
+        LOG.warn("Error converting old JobDefinition format", e);
+      }
+    }
   }
 
   public long getSeed() {
@@ -159,6 +251,25 @@ public class SynthTraceJobProducer implements JobStoryProducer {
     @JsonProperty("workloads")
     List<Workload> workloads;
 
+    List<Double> workload_weights;
+    JDKRandomGenerator rand;
+
+    public void init(JDKRandomGenerator random){
+      this.rand = random;
+      // Pass rand forward
+      for(Workload w : workloads){
+        w.init(rand);
+      }
+      // Initialize workload weights
+      workload_weights = new ArrayList<>();
+      for(Workload w : workloads){
+        workload_weights.add(w.workload_weight);
+      }
+    }
+
+    Workload generateWorkload(){
+      return workloads.get(SynthUtils.getWeighted(workload_weights, rand));
+    }
   }
 
   /**
@@ -174,16 +285,67 @@ public class SynthTraceJobProducer implements JobStoryProducer {
     @JsonProperty("queue_name")
     String queue_name;
     @JsonProperty("job_classes")
-    List<JobClass> job_classes;
+    List<JobDefinition> job_classes;
     @JsonProperty("time_distribution")
     List<TimeSample> time_distribution;
+
+    JDKRandomGenerator rand;
+
+    List<Double> job_weights;
+    List<Double> time_weights;
+
+    public void init(JDKRandomGenerator random){
+      this.rand = random;
+      // Validate and pass rand forward
+      for(JobDefinition def : job_classes){
+        validateJobDef(def);
+        def.init(rand);
+      }
+
+      // Initialize job weights
+      job_weights = new ArrayList<>();
+      job_weights = new ArrayList<>();
+      for(JobDefinition j : job_classes){
+        job_weights.add(j.class_weight);
+      }
+
+      // Initialize time weights
+      time_weights = new ArrayList<>();
+      for(TimeSample ts : time_distribution){
+        time_weights.add(ts.weight);
+      }
+    }
+
+    public long generateSubmissionTime(){
+      int index = SynthUtils.getWeighted(time_weights, rand);
+      // Retrieve the lower and upper bounds for this time "bucket"
+      int start = time_distribution.get(index).time;
+      // Get the beginning of the next time sample (if it exists)
+      index = (index+1)<time_distribution.size() ? index+1 : index;
+      int end = time_distribution.get(index).time;
+      int range = end-start;
+      // Within this time "bucket", uniformly pick a time if our
+      // range is non-zero, otherwise just use the start time of the bucket
+      return start + (range>0 ? rand.nextInt(range) : 0);
+    }
+
+    public JobDefinition generateJobDefinition(){
+      return job_classes.get(SynthUtils.getWeighted(job_weights, rand));
+    }
+
+    @Override
+    public String toString(){
+      return "\nWorkload " + workload_name + ", weight: " + workload_weight
+          + ", queue: " + queue_name + " "
+          + job_classes.toString().replace("\n", "\n\t");
+    }
   }
 
   /**
    * Class used to parse a job class from file.
    */
   @SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
-  public static class JobClass {
+  public static class JobDefinition {
 
     @JsonProperty("class_name")
     String class_name;
@@ -194,6 +356,23 @@ public class SynthTraceJobProducer implements JobStoryProducer {
     @JsonProperty("class_weight")
     double class_weight;
 
+    // am type to launch
+    @JsonProperty("type")
+    String type;
+    @JsonProperty("deadline_factor")
+    Sample deadline_factor;
+    @JsonProperty("duration")
+    Sample duration;
+    @JsonProperty("reservation")
+    Sample reservation;
+
+    @JsonProperty("tasks")
+    List<TaskDefinition> tasks;
+
+    @JsonProperty("params")
+    Map<String, String> params;
+
+    // Old JSON fields for backwards compatibility
     // reservation related params
     @JsonProperty("chance_of_reservation")
     double chance_of_reservation;
@@ -246,71 +425,227 @@ public class SynthTraceJobProducer implements JobStoryProducer {
     @JsonProperty("reduce_max_vcores_stddev")
     double reduce_max_vcores_stddev;
 
+    public void init(JDKRandomGenerator rand){
+      deadline_factor.init(rand);
+      duration.init(rand);
+      reservation.init(rand);
+
+      for(TaskDefinition t : tasks){
+        t.count.init(rand);
+        t.time.init(rand);
+        t.max_memory.init(rand);
+        t.max_vcores.init(rand);
+      }
+    }
+
+    @Override
+    public String toString(){
+      return "\nJobDefinition " + class_name + ", weight: " + class_weight
+          + ", type: " + type + " "
+          + tasks.toString().replace("\n", "\n\t");
+    }
   }
 
   /**
-   * This is used to define time-varying probability of a job start-time (e.g.,
-   * to simulate daily patterns).
+   * A task representing a type of container - e.g. "map" in mapreduce
    */
   @SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
-  public static class TimeSample {
-    // in sec
+  public static class TaskDefinition {
+
+    @JsonProperty("type")
+    String type;
+    @JsonProperty("count")
+    Sample count;
     @JsonProperty("time")
-    int time;
-    @JsonProperty("weight")
-    double jobs;
+    Sample time;
+    @JsonProperty("max_memory")
+    Sample max_memory;
+    @JsonProperty("max_vcores")
+    Sample max_vcores;
+    @JsonProperty("priority")
+    int priority;
+
+    @Override
+    public String toString(){
+      return "\nTaskDefinition " + type
+          + " Count[" + count + "] Time[" + time + "] Memory[" + max_memory
+          + "] Vcores[" + max_vcores + "] Priority[" + priority + "]";
+    }
   }
 
-  static class StoryParams {
-    private SynthJobClass pickedJobClass;
-    private long actualSubmissionTime;
+  /**
+   * Class used to parse value sample information.
+   */
+  @SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
+  public static class Sample {
+    private static final Dist DEFAULT_DIST = Dist.LOGNORM;
+
+    private final double val;
+    private final double std;
+    private final Dist dist;
+    private AbstractRealDistribution dist_instance;
+    private final List<String> discrete;
+    private final List<Double> weights;
+    private final Mode mode;
+
+    private JDKRandomGenerator rand;
+
+    private enum Mode{
+      CONST,
+      DIST,
+      DISC
+    }
 
-    StoryParams(SynthJobClass pickedJobClass, long actualSubmissionTime) {
-      this.pickedJobClass = pickedJobClass;
-      this.actualSubmissionTime = actualSubmissionTime;
+    private enum Dist{
+      LOGNORM,
+      NORM
     }
-  }
 
+    public Sample(Double val) throws JsonMappingException{
+      this(val, null);
+    }
 
-  void createStoryParams() {
+    public Sample(Double val, Double std) throws JsonMappingException{
+      this(val, std, null, null, null);
+    }
 
-    for (int i = 0; i < numJobs.get(); i++) {
-      int workload = SynthUtils.getWeighted(weightList, rand);
-      SynthWorkload pickedWorkload = workloads.get(workload);
-      long jobClass =
-          SynthUtils.getWeighted(pickedWorkload.getWeightList(), rand);
-      SynthJobClass pickedJobClass =
-          pickedWorkload.getClassList().get((int) jobClass);
-      long actualSubmissionTime = pickedWorkload.getBaseSubmissionTime(rand);
-      // long actualSubmissionTime = (i + 1) * 10;
-      listStoryParams
-          .add(new StoryParams(pickedJobClass, actualSubmissionTime));
+    @JsonCreator
+    public Sample(@JsonProperty("val") Double val,
+        @JsonProperty("std") Double std, @JsonProperty("dist") String dist,
+        @JsonProperty("discrete") List<String> discrete,
+        @JsonProperty("weights") List<Double> weights)
+        throws JsonMappingException{
+      // Different Modes
+      // - Constant: val must be specified, all else null. Sampling will
+      // return val.
+      // - Distribution: val, std specified, dist optional (defaults to
+      // LogNormal). Sampling will sample from the appropriate distribution
+      // - Discrete: discrete must be set to a list of strings or numbers,
+      // weights optional (defaults to uniform)
+
+      if(val!=null){
+        if(std==null){
+          // Constant
+          if(dist!=null || discrete!=null || weights!=null){
+            throw new JsonMappingException("Instantiation of " + Sample.class
+                + " failed");
+          }
+          mode = Mode.CONST;
+          this.val = val;
+          this.std = 0;
+          this.dist = null;
+          this.discrete = null;
+          this.weights = null;
+        } else {
+          // Distribution
+          if(discrete!=null || weights != null){
+            throw new JsonMappingException("Instantiation of " + Sample.class
+                + " failed");
+          }
+          mode = Mode.DIST;
+          this.val = val;
+          this.std = std;
+          this.dist = dist!=null ? Dist.valueOf(dist) : DEFAULT_DIST;
+          this.discrete = null;
+          this.weights = null;
+        }
+      } else {
+        // Discrete
+        if(discrete==null){
+          throw new JsonMappingException("Instantiation of " + Sample.class
+              + " failed");
+        }
+        mode = Mode.DISC;
+        this.val = 0;
+        this.std = 0;
+        this.dist = null;
+        this.discrete = discrete;
+        if(weights == null){
+          weights = new ArrayList<>(Collections.nCopies(
+              discrete.size(), 1.0));
+        }
+        if(weights.size() != discrete.size()){
+          throw new JsonMappingException("Instantiation of " + Sample.class
+              + " failed");
+        }
+        this.weights = weights;
+      }
     }
-  }
 
-  @Override
-  public JobStory getNextJob() throws IOException {
-    if (numJobs.decrementAndGet() < 0) {
-      return null;
+    public void init(JDKRandomGenerator random){
+      if(this.rand != null){
+        throw new YarnRuntimeException("init called twice");
+      }
+      this.rand = random;
+      if(mode == Mode.DIST){
+        switch(this.dist){
+        case LOGNORM:
+          this.dist_instance = SynthUtils.getLogNormalDist(rand, val, std);
+          return;
+        case NORM:
+          this.dist_instance = SynthUtils.getNormalDist(rand, val, std);
+          return;
+        default:
+          throw new YarnRuntimeException("Unknown distribution " + dist.name());
+        }
+      }
     }
-    StoryParams storyParams = listStoryParams.poll();
-    return storyParams.pickedJobClass.getJobStory(conf,
-        storyParams.actualSubmissionTime);
-  }
 
-  @Override
-  public void close() {
-  }
+    public int getInt(){
+      return Math.toIntExact(getLong());
+    }
 
-  @Override
-  public String toString() {
-    return "SynthTraceJobProducer [ conf=" + conf + ", numJobs=" + numJobs
-        + ", weightList=" + weightList + ", r=" + rand + ", totalWeight="
-        + totalWeight + ", workloads=" + workloads + "]";
-  }
+    public long getLong(){
+      return Math.round(getDouble());
+    }
+
+    public double getDouble(){
+      return Double.parseDouble(getString());
+    }
+
+    public String getString(){
+      if(this.rand == null){
+        throw new YarnRuntimeException("getValue called without init");
+      }
+      switch(mode){
+      case CONST:
+        return Double.toString(val);
+      case DIST:
+        return Double.toString(dist_instance.sample());
+      case DISC:
+        return this.discrete.get(SynthUtils.getWeighted(this.weights, rand));
+      default:
+        throw new YarnRuntimeException("Unknown sampling mode " + mode.name());
+      }
+    }
+
+    @Override
+    public String toString(){
+      switch(mode){
+      case CONST:
+        return "value: " + Double.toString(val);
+      case DIST:
+        return "value: " + this.val + " std: " + this.std + " dist: "
+            + this.dist.name();
+      case DISC:
+        return "discrete: " + this.discrete + ", weights: " + this.weights;
+      default:
+        throw new YarnRuntimeException("Unknown sampling mode " + mode.name());
+      }
+    }
 
-  public int getNumJobs() {
-    return trace.num_jobs;
   }
 
+  /**
+   * This is used to define time-varying probability of a job start-time (e.g.,
+   * to simulate daily patterns).
+   */
+  @SuppressWarnings({ "membername", "checkstyle:visibilitymodifier" })
+  public static class TimeSample {
+    // in sec
+    @JsonProperty("time")
+    int time;
+    @JsonProperty("weight")
+    double weight;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthWorkload.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthWorkload.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthWorkload.java
deleted file mode 100644
index 9e5fd4e..0000000
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/synthetic/SynthWorkload.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.sls.synthetic;
-
-import org.apache.hadoop.yarn.sls.synthetic.SynthTraceJobProducer.Trace;
-
-import java.util.*;
-
-/**
- * This class represent a workload (made up of multiple SynthJobClass(es)). It
- * also stores the temporal distributions of jobs in this workload.
- */
-public class SynthWorkload {
-
-  private final int id;
-  private final List<SynthJobClass> classList;
-  private final Trace trace;
-  private final SortedMap<Integer, Double> timeWeights;
-
-  public SynthWorkload(int identifier, Trace inTrace) {
-    classList = new ArrayList<SynthJobClass>();
-    this.id = identifier;
-    this.trace = inTrace;
-    timeWeights = new TreeMap<Integer, Double>();
-    for (SynthTraceJobProducer.TimeSample ts : trace.workloads
-        .get(id).time_distribution) {
-      timeWeights.put(ts.time, ts.jobs);
-    }
-  }
-
-  public boolean add(SynthJobClass s) {
-    return classList.add(s);
-  }
-
-  public List<Double> getWeightList() {
-    ArrayList<Double> ret = new ArrayList<Double>();
-    for (SynthJobClass s : classList) {
-      ret.add(s.getClassWeight());
-    }
-    return ret;
-  }
-
-  public int getId() {
-    return id;
-  }
-
-  @Override
-  public boolean equals(Object other) {
-    if (!(other instanceof SynthWorkload)) {
-      return false;
-    }
-    // assume ID determines job classes by construction
-    return getId() == ((SynthWorkload) other).getId();
-  }
-
-  @Override
-  public int hashCode() {
-    return getId();
-  }
-
-  @Override
-  public String toString() {
-    return "SynthWorkload " + trace.workloads.get(id).workload_name + "[\n"
-        + classList + "]\n";
-  }
-
-  public String getName() {
-    return trace.workloads.get(id).workload_name;
-  }
-
-  public double getWorkloadWeight() {
-    return trace.workloads.get(id).workload_weight;
-  }
-
-  public String getQueueName() {
-    return trace.workloads.get(id).queue_name;
-  }
-
-  public long getBaseSubmissionTime(Random rand) {
-
-    // pick based on weights the "bucket" for this start time
-    int position = SynthUtils.getWeighted(timeWeights.values(), rand);
-
-    int[] time = new int[timeWeights.keySet().size()];
-    int index = 0;
-    for (Integer i : timeWeights.keySet()) {
-      time[index++] = i;
-    }
-
-    // uniformly pick a time between start and end time of this bucket
-    int startRange = time[position];
-    int endRange = startRange;
-    // if there is no subsequent bucket pick startRange
-    if (position < timeWeights.keySet().size() - 1) {
-      endRange = time[position + 1];
-      return startRange + rand.nextInt((endRange - startRange));
-    } else {
-      return startRange;
-    }
-  }
-
-  public List<SynthJobClass> getClassList() {
-    return classList;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
index 6b369f2..668be14 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
@@ -125,7 +125,7 @@ public abstract class BaseSLSRunnerTest {
       if (!exceptionList.isEmpty()) {
         sls.stop();
         Assert.fail("TestSLSRunner catched exception from child thread "
-            + "(TaskRunner.Task): " + exceptionList);
+            + "(TaskRunner.TaskDefinition): " + exceptionList);
         break;
       }
       timeout--;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSGenericSynth.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSGenericSynth.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSGenericSynth.java
new file mode 100644
index 0000000..79ebe21
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSGenericSynth.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.sls;
+
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+/**
+ * This test performs simple runs of the SLS with the generic syn json format.
+ */
+@RunWith(value = Parameterized.class)
+@NotThreadSafe
+public class TestSLSGenericSynth extends BaseSLSRunnerTest {
+
+  @Parameters(name = "Testing with: {1}, {0}, (nodeFile {3})")
+  public static Collection<Object[]> data() {
+
+    String capScheduler = CapacityScheduler.class.getCanonicalName();
+    String fairScheduler = FairScheduler.class.getCanonicalName();
+    String synthTraceFile = "src/test/resources/syn_generic.json";
+    String nodeFile = "src/test/resources/nodes.json";
+
+    // Test with both schedulers
+    return Arrays.asList(new Object[][] {
+
+        // covering the no nodeFile case
+        {capScheduler, "SYNTH", synthTraceFile, null },
+
+        // covering new commandline and CapacityScheduler
+        {capScheduler, "SYNTH", synthTraceFile, nodeFile },
+
+        // covering FairScheduler
+        {fairScheduler, "SYNTH", synthTraceFile, nodeFile },
+    });
+  }
+
+  @Before
+  public void setup() {
+    ongoingInvariantFile = "src/test/resources/ongoing-invariants.txt";
+    exitInvariantFile = "src/test/resources/exit-invariants.txt";
+  }
+
+  @Test(timeout = 90000)
+  @SuppressWarnings("all")
+  public void testSimulatorRunning() throws Exception {
+    Configuration conf = new Configuration(false);
+    long timeTillShutdownInsec = 20L;
+    runSLS(conf, timeTillShutdownInsec);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84cea001/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSStreamAMSynth.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSStreamAMSynth.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSStreamAMSynth.java
new file mode 100644
index 0000000..a5d30e0
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSStreamAMSynth.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.sls;
+
+import net.jcip.annotations.NotThreadSafe;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.util.Arrays;
+import java.util.Collection;
+
+/**
+ * This test performs simple runs of the SLS with the generic syn json format.
+ */
+@RunWith(value = Parameterized.class)
+@NotThreadSafe
+public class TestSLSStreamAMSynth extends BaseSLSRunnerTest {
+
+  @Parameters(name = "Testing with: {1}, {0}, (nodeFile {3})")
+  public static Collection<Object[]> data() {
+
+    String capScheduler = CapacityScheduler.class.getCanonicalName();
+    String fairScheduler = FairScheduler.class.getCanonicalName();
+    String synthTraceFile = "src/test/resources/syn_stream.json";
+    String nodeFile = "src/test/resources/nodes.json";
+
+    // Test with both schedulers
+    return Arrays.asList(new Object[][] {
+
+        // covering the no nodeFile case
+        {capScheduler, "SYNTH", synthTraceFile, null },
+
+        // covering new commandline and CapacityScheduler
+        {capScheduler, "SYNTH", synthTraceFile, nodeFile },
+
+        // covering FairScheduler
+        {fairScheduler, "SYNTH", synthTraceFile, nodeFile },
+    });
+  }
+
+  @Before
+  public void setup() {
+    ongoingInvariantFile = "src/test/resources/ongoing-invariants.txt";
+    exitInvariantFile = "src/test/resources/exit-invariants.txt";
+  }
+
+  @Test(timeout = 90000)
+  @SuppressWarnings("all")
+  public void testSimulatorRunning() throws Exception {
+    Configuration conf = new Configuration(false);
+    long timeTillShutdownInsec = 20L;
+    runSLS(conf, timeTillShutdownInsec);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HDFS-13136. Avoid taking FSN lock while doing group member lookup for FSD permission check. Contributed by Xiaoyu Yao.

Posted by ha...@apache.org.
HDFS-13136. Avoid taking FSN lock while doing group member lookup for FSD permission check. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84a1321f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84a1321f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84a1321f

Branch: refs/heads/HDFS-12996
Commit: 84a1321f6aa0af6895564a7c47f8f264656f0294
Parents: 3132709
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu Feb 15 00:02:05 2018 -0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Feb 22 11:32:32 2018 -0800

----------------------------------------------------------------------
 .../server/namenode/EncryptionZoneManager.java  |   3 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  28 ++-
 .../hdfs/server/namenode/FSDirAttrOp.java       |  54 +++---
 .../hdfs/server/namenode/FSDirConcatOp.java     |   5 +-
 .../hdfs/server/namenode/FSDirDeleteOp.java     |   8 +-
 .../server/namenode/FSDirEncryptionZoneOp.java  |  12 +-
 .../hdfs/server/namenode/FSDirMkdirOp.java      |   3 +-
 .../hdfs/server/namenode/FSDirRenameOp.java     |  11 +-
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |  38 ++--
 .../server/namenode/FSDirStatAndListingOp.java  |  35 ++--
 .../hdfs/server/namenode/FSDirXAttrOp.java      |  35 ++--
 .../hdfs/server/namenode/FSNamesystem.java      | 190 ++++++++++++-------
 .../hdfs/server/namenode/NameNodeAdapter.java   |   5 +-
 .../hdfs/server/namenode/TestAuditLogger.java   |   3 +-
 .../namenode/TestAuditLoggerWithCommands.java   |   4 +-
 15 files changed, 243 insertions(+), 191 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 3fcf797..176ae1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -154,9 +154,10 @@ public class EncryptionZoneManager {
   public void pauseForTestingAfterNthCheckpoint(final String zone,
       final int count) throws IOException {
     INodesInPath iip;
+    final FSPermissionChecker pc = dir.getPermissionChecker();
     dir.readLock();
     try {
-      iip = dir.resolvePath(dir.getPermissionChecker(), zone, DirOp.READ);
+      iip = dir.resolvePath(pc, zone, DirOp.READ);
     } finally {
       dir.readUnlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index cc51430..7b3471d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -36,11 +36,10 @@ import java.util.List;
 
 class FSDirAclOp {
   static FileStatus modifyAclEntries(
-      FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
-      throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, final String srcArg,
+      List<AclEntry> aclSpec) throws IOException {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -61,11 +60,10 @@ class FSDirAclOp {
   }
 
   static FileStatus removeAclEntries(
-      FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
-      throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, final String srcArg,
+      List<AclEntry> aclSpec) throws IOException {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -85,11 +83,10 @@ class FSDirAclOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
-      throws IOException {
+  static FileStatus removeDefaultAcl(FSDirectory fsd, FSPermissionChecker pc,
+      final String srcArg) throws IOException {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -109,11 +106,10 @@ class FSDirAclOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus removeAcl(FSDirectory fsd, final String srcArg)
-      throws IOException {
+  static FileStatus removeAcl(FSDirectory fsd, FSPermissionChecker pc,
+      final String srcArg) throws IOException {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -129,11 +125,10 @@ class FSDirAclOp {
   }
 
   static FileStatus setAcl(
-      FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
-      throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, final String srcArg,
+      List<AclEntry> aclSpec) throws IOException {
     String src = srcArg;
     checkAclsConfigFlag(fsd);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -148,9 +143,8 @@ class FSDirAclOp {
   }
 
   static AclStatus getAclStatus(
-      FSDirectory fsd, String src) throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, String src) throws IOException {
     checkAclsConfigFlag(fsd);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     fsd.readLock();
     try {
       INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 201605f..406fe80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -51,12 +51,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KE
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
-      FSDirectory fsd, final String src, FsPermission permission)
-      throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, final String src,
+      FsPermission permission) throws IOException {
     if (FSDirectory.isExactReservedName(src)) {
       throw new InvalidPathException(src);
     }
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -71,12 +70,11 @@ public class FSDirAttrOp {
   }
 
   static FileStatus setOwner(
-      FSDirectory fsd, String src, String username, String group)
-      throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, String src, String username,
+      String group) throws IOException {
     if (FSDirectory.isExactReservedName(src)) {
       throw new InvalidPathException(src);
     }
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -101,10 +99,8 @@ public class FSDirAttrOp {
   }
 
   static FileStatus setTimes(
-      FSDirectory fsd, String src, long mtime, long atime)
-      throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
-
+      FSDirectory fsd, FSPermissionChecker pc, String src, long mtime,
+      long atime) throws IOException {
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -129,11 +125,10 @@ public class FSDirAttrOp {
   }
 
   static boolean setReplication(
-      FSDirectory fsd, BlockManager bm, String src, final short replication)
-      throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, BlockManager bm, String src,
+      final short replication) throws IOException {
     bm.verifyReplication(src, replication, null);
     final boolean isFile;
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     fsd.writeLock();
     try {
       final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.WRITE);
@@ -153,33 +148,31 @@ public class FSDirAttrOp {
     return isFile;
   }
 
-  static FileStatus unsetStoragePolicy(FSDirectory fsd, BlockManager bm,
-      String src) throws IOException {
-    return setStoragePolicy(fsd, bm, src,
+  static FileStatus unsetStoragePolicy(FSDirectory fsd, FSPermissionChecker pc,
+      BlockManager bm, String src) throws IOException {
+    return setStoragePolicy(fsd, pc, bm, src,
         HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset");
   }
 
-  static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
-      String src, final String policyName) throws IOException {
+  static FileStatus setStoragePolicy(FSDirectory fsd, FSPermissionChecker pc,
+      BlockManager bm, String src, final String policyName) throws IOException {
     // get the corresponding policy and make sure the policy name is valid
     BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
     if (policy == null) {
       throw new HadoopIllegalArgumentException(
           "Cannot find a block policy with the name " + policyName);
     }
-
-    return setStoragePolicy(fsd, bm, src, policy.getId(), "set");
+    return setStoragePolicy(fsd, pc, bm, src, policy.getId(), "set");
   }
 
-  static FileStatus setStoragePolicy(FSDirectory fsd, BlockManager bm,
-      String src, final byte policyId, final String operation)
+  static FileStatus setStoragePolicy(FSDirectory fsd, FSPermissionChecker pc,
+      BlockManager bm, String src, final byte policyId, final String operation)
       throws IOException {
     if (!fsd.isStoragePolicyEnabled()) {
       throw new IOException(String.format(
           "Failed to %s storage policy since %s is set to false.", operation,
           DFS_STORAGE_POLICY_ENABLED_KEY));
     }
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     INodesInPath iip;
     fsd.writeLock();
     try {
@@ -202,9 +195,8 @@ public class FSDirAttrOp {
     return bm.getStoragePolicies();
   }
 
-  static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm,
-      String path) throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
+  static BlockStoragePolicy getStoragePolicy(FSDirectory fsd,
+      FSPermissionChecker pc, BlockManager bm, String path) throws IOException {
     fsd.readLock();
     try {
       final INodesInPath iip = fsd.resolvePath(pc, path, DirOp.READ_LINK);
@@ -222,9 +214,8 @@ public class FSDirAttrOp {
     }
   }
 
-  static long getPreferredBlockSize(FSDirectory fsd, String src)
-      throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
+  static long getPreferredBlockSize(FSDirectory fsd, FSPermissionChecker pc,
+      String src) throws IOException {
     fsd.readLock();
     try {
       final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
@@ -240,9 +231,8 @@ public class FSDirAttrOp {
    *
    * Note: This does not support ".inodes" relative path.
    */
-  static void setQuota(FSDirectory fsd, String src, long nsQuota, long ssQuota,
-      StorageType type) throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
+  static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src,
+      long nsQuota, long ssQuota, StorageType type) throws IOException {
     if (fsd.isPermissionEnabled()) {
       pc.checkSuperuserPrivilege();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 4cc5389..b423a95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -48,14 +48,13 @@ import static org.apache.hadoop.util.Time.now;
  */
 class FSDirConcatOp {
 
-  static FileStatus concat(FSDirectory fsd, String target, String[] srcs,
-    boolean logRetryCache) throws IOException {
+  static FileStatus concat(FSDirectory fsd, FSPermissionChecker pc,
+      String target, String[] srcs, boolean logRetryCache) throws IOException {
     validatePath(target, srcs);
     assert srcs != null;
     if (FSDirectory.LOG.isDebugEnabled()) {
       FSDirectory.LOG.debug("concat {} to {}", Arrays.toString(srcs), target);
     }
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     final INodesInPath targetIIP = fsd.resolvePath(pc, target, DirOp.WRITE);
     // write permission for the target
     if (fsd.isPermissionEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index a83a8b6..1fbb564 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -88,6 +88,7 @@ class FSDirDeleteOp {
    * For small directory or file the deletion is done in one shot.
    *
    * @param fsn namespace
+   * @param pc FS permission checker
    * @param src path name to be deleted
    * @param recursive boolean true to apply to all sub-directories recursively
    * @param logRetryCache whether to record RPC ids in editlog for retry cache
@@ -96,10 +97,9 @@ class FSDirDeleteOp {
    * @throws IOException
    */
   static BlocksMapUpdateInfo delete(
-      FSNamesystem fsn, String src, boolean recursive, boolean logRetryCache)
-      throws IOException {
+      FSNamesystem fsn, FSPermissionChecker pc, String src, boolean recursive,
+      boolean logRetryCache) throws IOException {
     FSDirectory fsd = fsn.getFSDirectory();
-    FSPermissionChecker pc = fsd.getPermissionChecker();
 
     if (FSDirectory.isExactReservedName(src)) {
       throw new InvalidPathException(src);
@@ -130,7 +130,7 @@ class FSDirDeleteOp {
    * <br>
    *
    * @param fsd the FSDirectory instance
-   * @param src a string representation of a path to an inode
+   * @param iip inodes of a path to be deleted
    * @param mtime the time the inode is removed
    */
   static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index 943e60d..9fbdaeb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -693,11 +693,12 @@ final class FSDirEncryptionZoneOp {
    * a different ACL. HDFS should not try to operate on additional ACLs, but
    * rather use the generate ACL it already has.
    */
-  static String getCurrentKeyVersion(final FSDirectory dir, final String zone)
-      throws IOException {
+  static String getCurrentKeyVersion(final FSDirectory dir,
+      final FSPermissionChecker pc, final String zone) throws IOException {
     assert dir.getProvider() != null;
     assert !dir.hasReadLock();
-    final String keyName = FSDirEncryptionZoneOp.getKeyNameForZone(dir, zone);
+    final String keyName = FSDirEncryptionZoneOp.getKeyNameForZone(dir,
+        pc, zone);
     if (keyName == null) {
       throw new IOException(zone + " is not an encryption zone.");
     }
@@ -719,11 +720,10 @@ final class FSDirEncryptionZoneOp {
    * Resolve the zone to an inode, find the encryption zone info associated with
    * that inode, and return the key name. Does not contact the KMS.
    */
-  static String getKeyNameForZone(final FSDirectory dir, final String zone)
-      throws IOException {
+  static String getKeyNameForZone(final FSDirectory dir,
+      final FSPermissionChecker pc, final String zone) throws IOException {
     assert dir.getProvider() != null;
     final INodesInPath iip;
-    final FSPermissionChecker pc = dir.getPermissionChecker();
     dir.readLock();
     try {
       iip = dir.resolvePath(pc, zone, DirOp.READ);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
index 89fd8a3..45bb6b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
@@ -39,13 +39,12 @@ import static org.apache.hadoop.util.Time.now;
 
 class FSDirMkdirOp {
 
-  static FileStatus mkdirs(FSNamesystem fsn, String src,
+  static FileStatus mkdirs(FSNamesystem fsn, FSPermissionChecker pc, String src,
       PermissionStatus permissions, boolean createParent) throws IOException {
     FSDirectory fsd = fsn.getFSDirectory();
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
     }
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     fsd.writeLock();
     try {
       INodesInPath iip = fsd.resolvePath(pc, src, DirOp.CREATE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index bbbb724..efc8da2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -47,14 +47,12 @@ import static org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooL
 class FSDirRenameOp {
   @Deprecated
   static RenameResult renameToInt(
-      FSDirectory fsd, final String src, final String dst,
-      boolean logRetryCache)
-      throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, final String src,
+      final String dst, boolean logRetryCache) throws IOException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src +
           " to " + dst);
     }
-    FSPermissionChecker pc = fsd.getPermissionChecker();
 
     // Rename does not operate on link targets
     // Do not resolveLink when checking permissions of src and dst
@@ -230,8 +228,8 @@ class FSDirRenameOp {
    * The new rename which has the POSIX semantic.
    */
   static RenameResult renameToInt(
-      FSDirectory fsd, final String srcArg, final String dstArg,
-      boolean logRetryCache, Options.Rename... options)
+      FSDirectory fsd, FSPermissionChecker pc, final String srcArg,
+      final String dstArg, boolean logRetryCache, Options.Rename... options)
       throws IOException {
     String src = srcArg;
     String dst = dstArg;
@@ -239,7 +237,6 @@ class FSDirRenameOp {
       NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options -" +
           " " + src + " to " + dst);
     }
-    final FSPermissionChecker pc = fsd.getPermissionChecker();
 
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     // returns resolved path

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
index 4dacbf2..4a72f54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
@@ -80,14 +80,17 @@ class FSDirSnapshotOp {
 
   /**
    * Create a snapshot
+   * @param fsd FS directory
+   * @param pc FS permission checker
    * @param snapshotRoot The directory path where the snapshot is taken
    * @param snapshotName The name of the snapshot
+   * @param logRetryCache whether to record RPC ids in editlog for retry cache
+   *                      rebuilding.
    */
   static String createSnapshot(
-      FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
-      String snapshotName, boolean logRetryCache)
+      FSDirectory fsd, FSPermissionChecker pc, SnapshotManager snapshotManager,
+      String snapshotRoot, String snapshotName, boolean logRetryCache)
       throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     final INodesInPath iip = fsd.resolvePath(pc, snapshotRoot, DirOp.WRITE);
     if (fsd.isPermissionEnabled()) {
       fsd.checkOwner(pc, iip);
@@ -115,10 +118,9 @@ class FSDirSnapshotOp {
     return snapshotPath;
   }
 
-  static void renameSnapshot(FSDirectory fsd, SnapshotManager snapshotManager,
-      String path, String snapshotOldName, String snapshotNewName,
-      boolean logRetryCache) throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
+  static void renameSnapshot(FSDirectory fsd, FSPermissionChecker pc,
+      SnapshotManager snapshotManager, String path, String snapshotOldName,
+      String snapshotNewName, boolean logRetryCache) throws IOException {
     final INodesInPath iip = fsd.resolvePath(pc, path, DirOp.WRITE);
     if (fsd.isPermissionEnabled()) {
       fsd.checkOwner(pc, iip);
@@ -136,8 +138,8 @@ class FSDirSnapshotOp {
   }
 
   static SnapshottableDirectoryStatus[] getSnapshottableDirListing(
-      FSDirectory fsd, SnapshotManager snapshotManager) throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
+      FSDirectory fsd, FSPermissionChecker pc, SnapshotManager snapshotManager)
+      throws IOException {
     fsd.readLock();
     try {
       final String user = pc.isSuperUser()? null : pc.getUser();
@@ -148,10 +150,9 @@ class FSDirSnapshotOp {
   }
 
   static SnapshotDiffReport getSnapshotDiffReport(FSDirectory fsd,
-      SnapshotManager snapshotManager, String path,
+      FSPermissionChecker pc, SnapshotManager snapshotManager, String path,
       String fromSnapshot, String toSnapshot) throws IOException {
     SnapshotDiffReport diffs;
-    final FSPermissionChecker pc = fsd.getPermissionChecker();
     fsd.readLock();
     try {
       INodesInPath iip = fsd.resolvePath(pc, path, DirOp.READ);
@@ -167,11 +168,10 @@ class FSDirSnapshotOp {
   }
 
   static SnapshotDiffReportListing getSnapshotDiffReportListing(FSDirectory fsd,
-      SnapshotManager snapshotManager, String path, String fromSnapshot,
-      String toSnapshot, byte[] startPath, int index,
+      FSPermissionChecker pc, SnapshotManager snapshotManager, String path,
+      String fromSnapshot, String toSnapshot, byte[] startPath, int index,
       int snapshotDiffReportLimit) throws IOException {
     SnapshotDiffReportListing diffs;
-    final FSPermissionChecker pc = fsd.getPermissionChecker();
     fsd.readLock();
     try {
       INodesInPath iip = fsd.resolvePath(pc, path, DirOp.READ);
@@ -226,15 +226,19 @@ class FSDirSnapshotOp {
 
   /**
    * Delete a snapshot of a snapshottable directory
+   * @param fsd The FS directory
+   * @param pc The permission checker
+   * @param snapshotManager The snapshot manager
    * @param snapshotRoot The snapshottable directory
    * @param snapshotName The name of the to-be-deleted snapshot
+   * @param logRetryCache whether to record RPC ids in editlog for retry cache
+   *                      rebuilding.
    * @throws IOException
    */
   static INode.BlocksMapUpdateInfo deleteSnapshot(
-      FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
-      String snapshotName, boolean logRetryCache)
+      FSDirectory fsd, FSPermissionChecker pc, SnapshotManager snapshotManager,
+      String snapshotRoot, String snapshotName, boolean logRetryCache)
       throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     final INodesInPath iip = fsd.resolvePath(pc, snapshotRoot, DirOp.WRITE);
     if (fsd.isPermissionEnabled()) {
       fsd.checkOwner(pc, iip);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 8b77034..7e22ae1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -51,9 +51,9 @@ import java.util.EnumSet;
 import static org.apache.hadoop.util.Time.now;
 
 class FSDirStatAndListingOp {
-  static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
-      byte[] startAfter, boolean needLocation) throws IOException {
-    final FSPermissionChecker pc = fsd.getPermissionChecker();
+  static DirectoryListing getListingInt(FSDirectory fsd, FSPermissionChecker pc,
+      final String srcArg, byte[] startAfter, boolean needLocation)
+      throws IOException {
     final INodesInPath iip = fsd.resolvePath(pc, srcArg, DirOp.READ);
 
     // Get file name when startAfter is an INodePath.  This is not the
@@ -85,7 +85,8 @@ class FSDirStatAndListingOp {
 
   /**
    * Get the file info for a specific file.
-   *
+   * @param fsd The FS directory
+   * @param pc The permission checker
    * @param srcArg The string representation of the path to the file
    * @param resolveLink whether to throw UnresolvedLinkException
    *        if src refers to a symlink
@@ -95,11 +96,10 @@ class FSDirStatAndListingOp {
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  static HdfsFileStatus getFileInfo(FSDirectory fsd, String srcArg,
-      boolean resolveLink, boolean needLocation, boolean needBlockToken)
-      throws IOException {
+  static HdfsFileStatus getFileInfo(FSDirectory fsd, FSPermissionChecker pc,
+      String srcArg, boolean resolveLink, boolean needLocation,
+      boolean needBlockToken) throws IOException {
     DirOp dirOp = resolveLink ? DirOp.READ : DirOp.READ_LINK;
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     final INodesInPath iip;
     if (pc.isSuperUser()) {
       // superuser can only get an ACE if an existing ancestor is a file.
@@ -119,19 +119,18 @@ class FSDirStatAndListingOp {
   /**
    * Returns true if the file is closed
    */
-  static boolean isFileClosed(FSDirectory fsd, String src) throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
+  static boolean isFileClosed(FSDirectory fsd, FSPermissionChecker pc,
+      String src) throws IOException {
     final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
     return !INodeFile.valueOf(iip.getLastINode(), src).isUnderConstruction();
   }
 
   static ContentSummary getContentSummary(
-      FSDirectory fsd, String src) throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
+      FSDirectory fsd, FSPermissionChecker pc, String src) throws IOException {
     final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
     // getContentSummaryInt() call will check access (if enabled) when
     // traversing all sub directories.
-    return getContentSummaryInt(fsd, iip);
+    return getContentSummaryInt(fsd, pc, iip);
   }
 
   /**
@@ -516,7 +515,7 @@ class FSDirStatAndListingOp {
   }
 
   private static ContentSummary getContentSummaryInt(FSDirectory fsd,
-      INodesInPath iip) throws IOException {
+      FSPermissionChecker pc, INodesInPath iip) throws IOException {
     fsd.readLock();
     try {
       INode targetNode = iip.getLastINode();
@@ -528,8 +527,7 @@ class FSDirStatAndListingOp {
         // processed. 0 means disabled. I.e. blocking for the entire duration.
         ContentSummaryComputationContext cscc =
             new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
-                fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(),
-                fsd.getPermissionChecker());
+                fsd.getContentCountLimit(), fsd.getContentSleepMicroSec(), pc);
         ContentSummary cs = targetNode.computeAndConvertContentSummary(
             iip.getPathSnapshotId(), cscc);
         fsd.addYieldCount(cscc.getYieldCount());
@@ -541,8 +539,7 @@ class FSDirStatAndListingOp {
   }
 
   static QuotaUsage getQuotaUsage(
-      FSDirectory fsd, String src) throws IOException {
-    FSPermissionChecker pc = fsd.getPermissionChecker();
+      FSDirectory fsd, FSPermissionChecker pc, String src) throws IOException {
     final INodesInPath iip;
     fsd.readLock();
     try {
@@ -559,7 +556,7 @@ class FSDirStatAndListingOp {
       return usage;
     } else {
       //If quota isn't set, fall back to getContentSummary.
-      return getContentSummaryInt(fsd, iip);
+      return getContentSummaryInt(fsd, pc, iip);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index be3092c..24a475f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -51,22 +51,27 @@ class FSDirXAttrOp {
 
   /**
    * Set xattr for a file or directory.
-   *
+   * @param fsd
+   *          - FS directory
+   * @param pc
+   *          - FS permission checker
    * @param src
    *          - path on which it sets the xattr
    * @param xAttr
    *          - xAttr details to set
    * @param flag
    *          - xAttrs flags
+   * @param logRetryCache
+   *          - whether to record RPC ids in editlog for retry cache
+   *          rebuilding.
    * @throws IOException
    */
   static FileStatus setXAttr(
-      FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
-      boolean logRetryCache)
+      FSDirectory fsd, FSPermissionChecker pc, String src, XAttr xAttr,
+      EnumSet<XAttrSetFlag> flag, boolean logRetryCache)
       throws IOException {
     checkXAttrsConfigFlag(fsd);
     checkXAttrSize(fsd, xAttr);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     XAttrPermissionFilter.checkPermissionForApi(
         pc, xAttr, FSDirectory.isReservedRawName(src));
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
@@ -85,12 +90,10 @@ class FSDirXAttrOp {
     return fsd.getAuditFileInfo(iip);
   }
 
-  static List<XAttr> getXAttrs(FSDirectory fsd, final String srcArg,
-                               List<XAttr> xAttrs)
-      throws IOException {
+  static List<XAttr> getXAttrs(FSDirectory fsd, FSPermissionChecker pc,
+      final String srcArg, List<XAttr> xAttrs) throws IOException {
     String src = srcArg;
     checkXAttrsConfigFlag(fsd);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     final boolean isRawPath = FSDirectory.isReservedRawName(src);
     boolean getAll = xAttrs == null || xAttrs.isEmpty();
     if (!getAll) {
@@ -131,9 +134,8 @@ class FSDirXAttrOp {
   }
 
   static List<XAttr> listXAttrs(
-      FSDirectory fsd, String src) throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, String src) throws IOException {
     FSDirXAttrOp.checkXAttrsConfigFlag(fsd);
-    final FSPermissionChecker pc = fsd.getPermissionChecker();
     final boolean isRawPath = FSDirectory.isReservedRawName(src);
     final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
     if (fsd.isPermissionEnabled()) {
@@ -146,18 +148,23 @@ class FSDirXAttrOp {
 
   /**
    * Remove an xattr for a file or directory.
-   *
+   * @param fsd
+   *          - FS direcotry
+   * @param pc
+   *          - FS permission checker
    * @param src
    *          - path to remove the xattr from
    * @param xAttr
    *          - xAttr to remove
+   * @param logRetryCache
+   *          - whether to record RPC ids in editlog for retry cache
+   *          rebuilding.
    * @throws IOException
    */
   static FileStatus removeXAttr(
-      FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache)
-      throws IOException {
+      FSDirectory fsd, FSPermissionChecker pc, String src, XAttr xAttr,
+      boolean logRetryCache) throws IOException {
     FSDirXAttrOp.checkXAttrsConfigFlag(fsd);
-    FSPermissionChecker pc = fsd.getPermissionChecker();
     XAttrPermissionFilter.checkPermissionForApi(
         pc, xAttr, FSDirectory.isReservedRawName(src));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b0973a9..d36b122 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1869,11 +1869,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "setPermission";
     FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set permission for " + src);
-      auditStat = FSDirAttrOp.setPermission(dir, src, permission);
+      auditStat = FSDirAttrOp.setPermission(dir, pc, src, permission);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -1893,11 +1894,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "setOwner";
     FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set owner for " + src);
-      auditStat = FSDirAttrOp.setOwner(dir, src, username, group);
+      auditStat = FSDirAttrOp.setOwner(dir, pc, src, username, group);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -1917,7 +1919,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "open";
     checkOperation(OperationCategory.READ);
     GetBlockLocationsResult res = null;
-    FSPermissionChecker pc = getPermissionChecker();
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
@@ -2030,11 +2032,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "concat";
     FileStatus stat = null;
     boolean success = false;
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot concat " + target);
-      stat = FSDirConcatOp.concat(dir, target, srcs, logRetryCache);
+      stat = FSDirConcatOp.concat(dir, pc, target, srcs, logRetryCache);
       success = true;
     } catch (AccessControlException ace) {
       logAuditEvent(success, operationName, Arrays.toString(srcs),
@@ -2058,11 +2061,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "setTimes";
     FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set times " + src);
-      auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime);
+      auditStat = FSDirAttrOp.setTimes(dir, pc, src, mtime, atime);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -2096,8 +2100,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         throw new HadoopIllegalArgumentException(
             "Cannot truncate to a negative file size: " + newLength + ".");
       }
-      final FSPermissionChecker pc = getPermissionChecker();
       checkOperation(OperationCategory.WRITE);
+      final FSPermissionChecker pc = getPermissionChecker();
       writeLock();
       BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
       try {
@@ -2166,11 +2170,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "setReplication";
     boolean success = false;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set replication for " + src);
-      success = FSDirAttrOp.setReplication(dir, blockManager, src, replication);
+      success = FSDirAttrOp.setReplication(dir, pc, blockManager, src,
+          replication);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -2194,11 +2200,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "setStoragePolicy";
     FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set storage policy for " + src);
-      auditStat = FSDirAttrOp.setStoragePolicy(dir, blockManager, src,
+      auditStat = FSDirAttrOp.setStoragePolicy(dir, pc, blockManager, src,
                                                policyName);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
@@ -2219,11 +2226,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "unsetStoragePolicy";
     FileStatus auditStat;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot unset storage policy for " + src);
-      auditStat = FSDirAttrOp.unsetStoragePolicy(dir, blockManager, src);
+      auditStat = FSDirAttrOp.unsetStoragePolicy(dir, pc, blockManager, src);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -2242,10 +2250,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   BlockStoragePolicy getStoragePolicy(String src) throws IOException {
     checkOperation(OperationCategory.READ);
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      return FSDirAttrOp.getStoragePolicy(dir, blockManager, src);
+      return FSDirAttrOp.getStoragePolicy(dir, pc, blockManager, src);
     } finally {
       readUnlock("getStoragePolicy");
     }
@@ -2267,10 +2276,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   long getPreferredBlockSize(String src) throws IOException {
     checkOperation(OperationCategory.READ);
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      return FSDirAttrOp.getPreferredBlockSize(dir, src);
+      return FSDirAttrOp.getPreferredBlockSize(dir, pc, src);
     } finally {
       readUnlock("getPreferredBlockSize");
     }
@@ -2374,13 +2384,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           "ecPolicyName are exclusive parameters. Set both is not allowed!");
     }
 
-    FSPermissionChecker pc = getPermissionChecker();
     INodesInPath iip = null;
     boolean skipSync = true; // until we do something that might create edits
     HdfsFileStatus stat = null;
     BlocksMapUpdateInfo toRemoveBlocks = null;
 
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2461,8 +2471,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   boolean recoverLease(String src, String holder, String clientMachine)
       throws IOException {
     boolean skipSync = false;
-    FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2601,8 +2611,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     try {
       boolean skipSync = false;
       LastBlockWithStatus lbs = null;
-      final FSPermissionChecker pc = getPermissionChecker();
       checkOperation(OperationCategory.WRITE);
+      final FSPermissionChecker pc = getPermissionChecker();
       writeLock();
       try {
         checkOperation(OperationCategory.WRITE);
@@ -2657,8 +2667,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
     FSDirWriteFileOp.ValidateAddBlockResult r;
-    FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
@@ -2708,7 +2718,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final List<DatanodeStorageInfo> chosen;
     final BlockType blockType;
     checkOperation(OperationCategory.READ);
-    FSPermissionChecker pc = getPermissionChecker();
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
@@ -2756,7 +2766,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     NameNode.stateChangeLog.debug(
         "BLOCK* NameSystem.abandonBlock: {} of file {}", b, src);
     checkOperation(OperationCategory.WRITE);
-    FSPermissionChecker pc = getPermissionChecker();
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2821,7 +2831,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     throws IOException {
     boolean success = false;
     checkOperation(OperationCategory.WRITE);
-    FSPermissionChecker pc = getPermissionChecker();
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -2899,11 +2909,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     final String operationName = "rename";
     FSDirRenameOp.RenameResult ret = null;
+    checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot rename " + src);
-      ret = FSDirRenameOp.renameToInt(dir, src, dst, logRetryCache);
+      ret = FSDirRenameOp.renameToInt(dir, pc, src, dst, logRetryCache);
     } catch (AccessControlException e)  {
       logAuditEvent(false, operationName, src, dst, null);
       throw e;
@@ -2923,11 +2935,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     final String operationName = "rename";
     FSDirRenameOp.RenameResult res = null;
+    checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot rename " + src);
-      res = FSDirRenameOp.renameToInt(dir, src, dst, logRetryCache, options);
+      res = FSDirRenameOp.renameToInt(dir, pc, src, dst, logRetryCache,
+          options);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName + " (options=" +
           Arrays.toString(options) + ")", src, dst, null);
@@ -2958,13 +2973,15 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     final String operationName = "delete";
     BlocksMapUpdateInfo toRemovedBlocks = null;
+    checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     boolean ret = false;
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot delete " + src);
       toRemovedBlocks = FSDirDeleteOp.delete(
-          this, src, recursive, logRetryCache);
+          this, pc, src, recursive, logRetryCache);
       ret = toRemovedBlocks != null;
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
@@ -3063,11 +3080,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = needBlockToken ? "open" : "getfileinfo";
     checkOperation(OperationCategory.READ);
     HdfsFileStatus stat = null;
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
       stat = FSDirStatAndListingOp.getFileInfo(
-          dir, src, resolveLink, needLocation, needBlockToken);
+          dir, pc, src, resolveLink, needLocation, needBlockToken);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -3084,10 +3102,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   boolean isFileClosed(final String src) throws IOException {
     final String operationName = "isFileClosed";
     checkOperation(OperationCategory.READ);
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      return FSDirStatAndListingOp.isFileClosed(dir, src);
+      return FSDirStatAndListingOp.isFileClosed(dir, pc, src);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -3104,11 +3123,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "mkdirs";
     FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot create directory " + src);
-      auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent);
+      auditStat = FSDirMkdirOp.mkdirs(this, pc, src, permissions,
+          createParent);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -3137,12 +3158,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   ContentSummary getContentSummary(final String src) throws IOException {
     checkOperation(OperationCategory.READ);
     final String operationName = "contentSummary";
-    readLock();
     boolean success = true;
     ContentSummary cs;
+    final FSPermissionChecker pc = getPermissionChecker();
+    readLock();
     try {
       checkOperation(OperationCategory.READ);
-      cs = FSDirStatAndListingOp.getContentSummary(dir, src);
+      cs = FSDirStatAndListingOp.getContentSummary(dir, pc, src);
     } catch (AccessControlException ace) {
       success = false;
       logAuditEvent(success, operationName, src);
@@ -3172,11 +3194,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     checkOperation(OperationCategory.READ);
     final String operationName = "quotaUsage";
     QuotaUsage quotaUsage;
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     boolean success = true;
     try {
       checkOperation(OperationCategory.READ);
-      quotaUsage = FSDirStatAndListingOp.getQuotaUsage(dir, src);
+      quotaUsage = FSDirStatAndListingOp.getQuotaUsage(dir, pc, src);
     } catch (AccessControlException ace) {
       success = false;
       logAuditEvent(success, operationName, src);
@@ -3202,12 +3225,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
     checkOperation(OperationCategory.WRITE);
     final String operationName = getQuotaCommand(nsQuota, ssQuota);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     boolean success = false;
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set quota on " + src);
-      FSDirAttrOp.setQuota(dir, src, nsQuota, ssQuota, type);
+      FSDirAttrOp.setQuota(dir, pc, src, nsQuota, ssQuota, type);
       success = true;
     } catch (AccessControlException ace) {
       logAuditEvent(success, operationName, src);
@@ -3234,8 +3258,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName);
     checkOperation(OperationCategory.WRITE);
-
-    FSPermissionChecker pc = getPermissionChecker();
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
@@ -3739,10 +3762,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     checkOperation(OperationCategory.READ);
     final String operationName = "listStatus";
     DirectoryListing dl = null;
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(NameNode.OperationCategory.READ);
-      dl = getListingInt(dir, src, startAfter, needLocation);
+      dl = getListingInt(dir, pc, src, startAfter, needLocation);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -4678,6 +4702,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
 
+  void checkSuperuserPrivilege(FSPermissionChecker pc)
+      throws AccessControlException {
+    if (isPermissionEnabled) {
+      pc.checkSuperuserPrivilege();
+    }
+  }
+
   /**
    * Check to see if we have exceeded the limit on the number
    * of inodes.
@@ -6365,14 +6396,16 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   String createSnapshot(String snapshotRoot, String snapshotName,
                         boolean logRetryCache) throws IOException {
+    checkOperation(OperationCategory.WRITE);
     final String operationName = "createSnapshot";
     String snapshotPath = null;
     boolean success = false;
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot create snapshot for " + snapshotRoot);
-      snapshotPath = FSDirSnapshotOp.createSnapshot(dir,
+      snapshotPath = FSDirSnapshotOp.createSnapshot(dir, pc,
           snapshotManager, snapshotRoot, snapshotName, logRetryCache);
       success = true;
     } catch (AccessControlException ace) {
@@ -6399,15 +6432,17 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void renameSnapshot(
       String path, String snapshotOldName, String snapshotNewName,
       boolean logRetryCache) throws IOException {
+    checkOperation(OperationCategory.WRITE);
     final String operationName = "renameSnapshot";
     boolean success = false;
     String oldSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotOldName);
     String newSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotNewName);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot rename snapshot for " + path);
-      FSDirSnapshotOp.renameSnapshot(dir, snapshotManager, path,
+      FSDirSnapshotOp.renameSnapshot(dir, pc, snapshotManager, path,
           snapshotOldName, snapshotNewName, logRetryCache);
       success = true;
     } catch (AccessControlException ace) {
@@ -6435,10 +6470,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     SnapshottableDirectoryStatus[] status = null;
     checkOperation(OperationCategory.READ);
     boolean success = false;
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      status = FSDirSnapshotOp.getSnapshottableDirListing(dir, snapshotManager);
+      status = FSDirSnapshotOp.getSnapshottableDirListing(dir, pc,
+          snapshotManager);
       success = true;
     } catch (AccessControlException ace) {
       logAuditEvent(success, operationName, null, null, null);
@@ -6475,10 +6512,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         path : Snapshot.getSnapshotPath(path, fromSnapshot);
     String toSnapshotRoot = (toSnapshot == null || toSnapshot.isEmpty()) ?
         path : Snapshot.getSnapshotPath(path, toSnapshot);
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      diffs = FSDirSnapshotOp.getSnapshotDiffReport(dir, snapshotManager,
+      diffs = FSDirSnapshotOp.getSnapshotDiffReport(dir, pc, snapshotManager,
           path, fromSnapshot, toSnapshot);
       success = true;
     } catch (AccessControlException ace) {
@@ -6530,11 +6568,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     String toSnapshotRoot =
         (toSnapshot == null || toSnapshot.isEmpty()) ? path :
             Snapshot.getSnapshotPath(path, toSnapshot);
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
       diffs = FSDirSnapshotOp
-          .getSnapshotDiffReportListing(dir, snapshotManager, path,
+          .getSnapshotDiffReportListing(dir, pc, snapshotManager, path,
               fromSnapshot, toSnapshot, startPath, index,
               snapshotDiffReportLimit);
       success = true;
@@ -6562,14 +6601,15 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "deleteSnapshot";
     boolean success = false;
     String rootPath = null;
-    writeLock();
     BlocksMapUpdateInfo blocksToBeDeleted = null;
+    final FSPermissionChecker pc = getPermissionChecker();
+    writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot);
       rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName);
-      blocksToBeDeleted = FSDirSnapshotOp.deleteSnapshot(dir, snapshotManager,
-          snapshotRoot, snapshotName, logRetryCache);
+      blocksToBeDeleted = FSDirSnapshotOp.deleteSnapshot(dir, pc,
+          snapshotManager, snapshotRoot, snapshotName, logRetryCache);
       success = true;
     } catch (AccessControlException ace) {
       logAuditEvent(success, operationName, rootPath, null, null);
@@ -7051,11 +7091,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "modifyAclEntries";
     FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
-      auditStat = FSDirAclOp.modifyAclEntries(dir, src, aclSpec);
+      auditStat = FSDirAclOp.modifyAclEntries(dir, pc, src, aclSpec);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7071,11 +7112,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "removeAclEntries";
     checkOperation(OperationCategory.WRITE);
     FileStatus auditStat = null;
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
-      auditStat = FSDirAclOp.removeAclEntries(dir, src, aclSpec);
+      auditStat = FSDirAclOp.removeAclEntries(dir, pc, src, aclSpec);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7090,11 +7132,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "removeDefaultAcl";
     FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
-      auditStat = FSDirAclOp.removeDefaultAcl(dir, src);
+      auditStat = FSDirAclOp.removeDefaultAcl(dir, pc, src);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7109,11 +7152,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "removeAcl";
     FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove ACL on " + src);
-      auditStat = FSDirAclOp.removeAcl(dir, src);
+      auditStat = FSDirAclOp.removeAcl(dir, pc, src);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7128,11 +7172,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "setAcl";
     FileStatus auditStat = null;
     checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set ACL on " + src);
-      auditStat = FSDirAclOp.setAcl(dir, src, aclSpec);
+      auditStat = FSDirAclOp.setAcl(dir, pc, src, aclSpec);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7147,10 +7192,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "getAclStatus";
     checkOperation(OperationCategory.READ);
     final AclStatus ret;
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      ret = FSDirAclOp.getAclStatus(dir, src);
+      ret = FSDirAclOp.getAclStatus(dir, pc, src);
     } catch(AccessControlException ace) {
       logAuditEvent(false, operationName, src);
       throw ace;
@@ -7179,13 +7225,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     try {
       Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir,
           keyName, src);
-      checkSuperuserPrivilege();
-      FSPermissionChecker pc = getPermissionChecker();
+      final FSPermissionChecker pc = getPermissionChecker();
+      checkSuperuserPrivilege(pc);
       checkOperation(OperationCategory.WRITE);
       final FileStatus resultingStat;
       writeLock();
       try {
-        checkSuperuserPrivilege();
+        checkSuperuserPrivilege(pc);
         checkOperation(OperationCategory.WRITE);
         checkNameNodeSafeMode("Cannot create encryption zone on " + src);
         resultingStat = FSDirEncryptionZoneOp.createEncryptionZone(dir, src,
@@ -7240,12 +7286,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     final String operationName = "listEncryptionZones";
     boolean success = false;
-    checkSuperuserPrivilege();
     checkOperation(OperationCategory.READ);
+    final FSPermissionChecker pc = getPermissionChecker();
+    checkSuperuserPrivilege(pc);
     readLock();
     try {
-      checkSuperuserPrivilege();
       checkOperation(OperationCategory.READ);
+      checkSuperuserPrivilege(pc);
       final BatchedListEntries<EncryptionZone> ret =
           FSDirEncryptionZoneOp.listEncryptionZones(dir, prevId);
       success = true;
@@ -7261,11 +7308,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     boolean success = false;
     try {
       Preconditions.checkNotNull(zone, "zone is null.");
-      checkSuperuserPrivilege();
       checkOperation(OperationCategory.WRITE);
+      final FSPermissionChecker pc = dir.getPermissionChecker();
+      checkSuperuserPrivilege(pc);
       checkNameNodeSafeMode("NameNode in safemode, cannot " + action
           + " re-encryption on zone " + zone);
-      reencryptEncryptionZoneInt(zone, action, logRetryCache);
+      reencryptEncryptionZoneInt(pc, zone, action, logRetryCache);
       success = true;
     } finally {
       logAuditEvent(success, action + "reencryption", zone, null, null);
@@ -7276,12 +7324,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       final long prevId) throws IOException {
     final String operationName = "listReencryptionStatus";
     boolean success = false;
-    checkSuperuserPrivilege();
     checkOperation(OperationCategory.READ);
+    final FSPermissionChecker pc = getPermissionChecker();
+    checkSuperuserPrivilege(pc);
     readLock();
     try {
-      checkSuperuserPrivilege();
       checkOperation(OperationCategory.READ);
+      checkSuperuserPrivilege(pc);
       final BatchedListEntries<ZoneReencryptionStatus> ret =
           FSDirEncryptionZoneOp.listReencryptionStatus(dir, prevId);
       success = true;
@@ -7292,9 +7341,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
 
-  private void reencryptEncryptionZoneInt(final String zone,
-      final ReencryptAction action, final boolean logRetryCache)
-      throws IOException {
+  private void reencryptEncryptionZoneInt(final FSPermissionChecker pc,
+      final String zone, final ReencryptAction action,
+      final boolean logRetryCache) throws IOException {
     if (getProvider() == null) {
       throw new IOException("No key provider configured, re-encryption "
           + "operation is rejected");
@@ -7302,7 +7351,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     String keyVersionName = null;
     if (action == ReencryptAction.START) {
       // get zone's latest key version name out of the lock.
-      keyVersionName = FSDirEncryptionZoneOp.getCurrentKeyVersion(dir, zone);
+      keyVersionName =
+          FSDirEncryptionZoneOp.getCurrentKeyVersion(dir, pc, zone);
       if (keyVersionName == null) {
         throw new IOException("Failed to get key version name for " + zone);
       }
@@ -7311,11 +7361,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
     writeLock();
     try {
-      checkSuperuserPrivilege();
+      checkSuperuserPrivilege(pc);
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("NameNode in safemode, cannot " + action
           + " re-encryption on zone " + zone);
-      final FSPermissionChecker pc = dir.getPermissionChecker();
       List<XAttr> xattrs;
       dir.writeLock();
       try {
@@ -7550,7 +7599,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "getErasureCodingPolicy";
     boolean success = false;
     checkOperation(OperationCategory.READ);
-    FSPermissionChecker pc = getPermissionChecker();
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
@@ -7609,11 +7658,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     final String operationName = "setXAttr";
     FileStatus auditStat = null;
+    checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set XAttr on " + src);
-      auditStat = FSDirXAttrOp.setXAttr(dir, src, xAttr, flag, logRetryCache);
+      auditStat = FSDirXAttrOp.setXAttr(dir, pc, src, xAttr, flag,
+          logRetryCache);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7629,10 +7681,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "getXAttrs";
     checkOperation(OperationCategory.READ);
     List<XAttr> fsXattrs;
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      fsXattrs = FSDirXAttrOp.getXAttrs(dir, src, xAttrs);
+      fsXattrs = FSDirXAttrOp.getXAttrs(dir, pc, src, xAttrs);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7647,10 +7700,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     final String operationName = "listXAttrs";
     checkOperation(OperationCategory.READ);
     List<XAttr> fsXattrs;
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      fsXattrs = FSDirXAttrOp.listXAttrs(dir, src);
+      fsXattrs = FSDirXAttrOp.listXAttrs(dir, pc, src);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7665,11 +7719,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       throws IOException {
     final String operationName = "removeXAttr";
     FileStatus auditStat = null;
+    checkOperation(OperationCategory.WRITE);
+    final FSPermissionChecker pc = getPermissionChecker();
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
-      auditStat = FSDirXAttrOp.removeXAttr(dir, src, xAttr, logRetryCache);
+      auditStat = FSDirXAttrOp.removeXAttr(dir, pc, src, xAttr, logRetryCache);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -7683,7 +7739,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   void checkAccess(String src, FsAction mode) throws IOException {
     final String operationName = "checkAccess";
     checkOperation(OperationCategory.READ);
-    FSPermissionChecker pc = getPermissionChecker();
+    final FSPermissionChecker pc = getPermissionChecker();
     readLock();
     try {
       checkOperation(OperationCategory.READ);
@@ -7934,6 +7990,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         .size();
   }
 
+  // This method logs operatoinName without super user privilege.
+  // It should be called without holding FSN lock.
   void checkSuperuserPrivilege(String operationName)
       throws IOException {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 11d7959..a71538d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -75,10 +75,13 @@ public class NameNodeAdapter {
       boolean resolveLink, boolean needLocation, boolean needBlockToken)
       throws AccessControlException, UnresolvedLinkException, StandbyException,
       IOException {
+    final FSPermissionChecker pc =
+        namenode.getNamesystem().getPermissionChecker();
     namenode.getNamesystem().readLock();
     try {
       return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
-          .getFSDirectory(), src, resolveLink, needLocation, needBlockToken);
+          .getFSDirectory(), pc, src, resolveLink, needLocation,
+          needBlockToken);
     } finally {
       namenode.getNamesystem().readUnlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index c422f32..5b4f1f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -75,6 +75,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.doThrow;
 
 /**
@@ -415,7 +416,7 @@ public class TestAuditLogger {
 
       final FSDirectory mockedDir = Mockito.spy(dir);
       AccessControlException ex = new AccessControlException();
-      doThrow(ex).when(mockedDir).getPermissionChecker();
+      doThrow(ex).when(mockedDir).checkTraverse(any(), any(), any());
       cluster.getNamesystem().setFSDirectory(mockedDir);
       assertTrue(DummyAuditLogger.initialized);
       DummyAuditLogger.resetLogCount();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84a1321f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
index 4eda88f..41ee03f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java
@@ -53,6 +53,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import org.mockito.Mockito;
+
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
@@ -618,7 +620,7 @@ public class TestAuditLoggerWithCommands {
     final FSDirectory dir = cluster.getNamesystem().getFSDirectory();
     final FSDirectory mockedDir = Mockito.spy(dir);
     AccessControlException ex = new AccessControlException();
-    doThrow(ex).when(mockedDir).getPermissionChecker();
+    doThrow(ex).when(mockedDir).checkTraverse(any(), any(), any());
     cluster.getNamesystem().setFSDirectory(mockedDir);
     String aceGetAclStatus =
         ".*allowed=false.*ugi=theDoctor.*cmd=getAclStatus.*";


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
new file mode 100644
index 0000000..41a371b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
@@ -0,0 +1,277 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScanType;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineServerUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Coprocessor for flow run table.
+ */
+public class FlowRunCoprocessor extends BaseRegionObserver {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FlowRunCoprocessor.class);
+
+  private Region region;
+  /**
+   * generate a timestamp that is unique per row in a region this is per region.
+   */
+  private final TimestampGenerator timestampGenerator =
+      new TimestampGenerator();
+
+  @Override
+  public void start(CoprocessorEnvironment e) throws IOException {
+    if (e instanceof RegionCoprocessorEnvironment) {
+      RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
+      this.region = env.getRegion();
+    }
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * This method adds the tags onto the cells in the Put. It is presumed that
+   * all the cells in one Put have the same set of Tags. The existing cell
+   * timestamp is overwritten for non-metric cells and each such cell gets a new
+   * unique timestamp generated by {@link TimestampGenerator}
+   *
+   * @see
+   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
+   * .hadoop.hbase.coprocessor.ObserverContext,
+   * org.apache.hadoop.hbase.client.Put,
+   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
+   * org.apache.hadoop.hbase.client.Durability)
+   */
+  @Override
+  public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put,
+      WALEdit edit, Durability durability) throws IOException {
+    Map<String, byte[]> attributes = put.getAttributesMap();
+    // Assumption is that all the cells in a put are the same operation.
+    List<Tag> tags = new ArrayList<>();
+    if ((attributes != null) && (attributes.size() > 0)) {
+      for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) {
+        Tag t = HBaseTimelineServerUtils.getTagFromAttribute(attribute);
+        if (t != null) {
+          tags.add(t);
+        }
+      }
+      byte[] tagByteArray = Tag.fromList(tags);
+      NavigableMap<byte[], List<Cell>> newFamilyMap = new TreeMap<>(
+          Bytes.BYTES_COMPARATOR);
+      for (Map.Entry<byte[], List<Cell>> entry : put.getFamilyCellMap()
+          .entrySet()) {
+        List<Cell> newCells = new ArrayList<>(entry.getValue().size());
+        for (Cell cell : entry.getValue()) {
+          // for each cell in the put add the tags
+          // Assumption is that all the cells in
+          // one put are the same operation
+          // also, get a unique cell timestamp for non-metric cells
+          // this way we don't inadvertently overwrite cell versions
+          long cellTimestamp = getCellTimestamp(cell.getTimestamp(), tags);
+          newCells.add(CellUtil.createCell(CellUtil.cloneRow(cell),
+              CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
+              cellTimestamp, KeyValue.Type.Put, CellUtil.cloneValue(cell),
+              tagByteArray));
+        }
+        newFamilyMap.put(entry.getKey(), newCells);
+      } // for each entry
+      // Update the family map for the Put
+      put.setFamilyCellMap(newFamilyMap);
+    }
+  }
+
+  /**
+   * Determines if the current cell's timestamp is to be used or a new unique
+   * cell timestamp is to be used. The reason this is done is to inadvertently
+   * overwrite cells when writes come in very fast. But for metric cells, the
+   * cell timestamp signifies the metric timestamp. Hence we don't want to
+   * overwrite it.
+   *
+   * @param timestamp
+   * @param tags
+   * @return cell timestamp
+   */
+  private long getCellTimestamp(long timestamp, List<Tag> tags) {
+    // if ts not set (hbase sets to HConstants.LATEST_TIMESTAMP by default)
+    // then use the generator
+    if (timestamp == HConstants.LATEST_TIMESTAMP) {
+      return timestampGenerator.getUniqueTimestamp();
+    } else {
+      return timestamp;
+    }
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * Creates a {@link FlowScanner} Scan so that it can correctly process the
+   * contents of {@link FlowRunTable}.
+   *
+   * @see
+   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#preGetOp(org.apache
+   * .hadoop.hbase.coprocessor.ObserverContext,
+   * org.apache.hadoop.hbase.client.Get, java.util.List)
+   */
+  @Override
+  public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e,
+      Get get, List<Cell> results) throws IOException {
+    Scan scan = new Scan(get);
+    scan.setMaxVersions();
+    RegionScanner scanner = null;
+    try {
+      scanner = new FlowScanner(e.getEnvironment(), scan,
+          region.getScanner(scan), FlowScannerOperation.READ);
+      scanner.next(results);
+      e.bypass();
+    } finally {
+      if (scanner != null) {
+        scanner.close();
+      }
+    }
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * Ensures that max versions are set for the Scan so that metrics can be
+   * correctly aggregated and min/max can be correctly determined.
+   *
+   * @see
+   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#preScannerOpen(org
+   * .apache.hadoop.hbase.coprocessor.ObserverContext,
+   * org.apache.hadoop.hbase.client.Scan,
+   * org.apache.hadoop.hbase.regionserver.RegionScanner)
+   */
+  @Override
+  public RegionScanner preScannerOpen(
+      ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
+      RegionScanner scanner) throws IOException {
+    // set max versions for scan to see all
+    // versions to aggregate for metrics
+    scan.setMaxVersions();
+    return scanner;
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * Creates a {@link FlowScanner} Scan so that it can correctly process the
+   * contents of {@link FlowRunTable}.
+   *
+   * @see
+   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#postScannerOpen(
+   * org.apache.hadoop.hbase.coprocessor.ObserverContext,
+   * org.apache.hadoop.hbase.client.Scan,
+   * org.apache.hadoop.hbase.regionserver.RegionScanner)
+   */
+  @Override
+  public RegionScanner postScannerOpen(
+      ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
+      RegionScanner scanner) throws IOException {
+    return new FlowScanner(e.getEnvironment(), scan,
+        scanner, FlowScannerOperation.READ);
+  }
+
+  @Override
+  public InternalScanner preFlush(
+      ObserverContext<RegionCoprocessorEnvironment> c, Store store,
+      InternalScanner scanner) throws IOException {
+    if (LOG.isDebugEnabled()) {
+      if (store != null) {
+        LOG.debug("preFlush store = " + store.getColumnFamilyName()
+            + " flushableSize=" + store.getFlushableSize()
+            + " flushedCellsCount=" + store.getFlushedCellsCount()
+            + " compactedCellsCount=" + store.getCompactedCellsCount()
+            + " majorCompactedCellsCount="
+            + store.getMajorCompactedCellsCount() + " memstoreFlushSize="
+            + store.getMemstoreFlushSize() + " memstoreSize="
+            + store.getMemStoreSize() + " size=" + store.getSize()
+            + " storeFilesCount=" + store.getStorefilesCount());
+      }
+    }
+    return new FlowScanner(c.getEnvironment(), scanner,
+        FlowScannerOperation.FLUSH);
+  }
+
+  @Override
+  public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c,
+      Store store, StoreFile resultFile) {
+    if (LOG.isDebugEnabled()) {
+      if (store != null) {
+        LOG.debug("postFlush store = " + store.getColumnFamilyName()
+            + " flushableSize=" + store.getFlushableSize()
+            + " flushedCellsCount=" + store.getFlushedCellsCount()
+            + " compactedCellsCount=" + store.getCompactedCellsCount()
+            + " majorCompactedCellsCount="
+            + store.getMajorCompactedCellsCount() + " memstoreFlushSize="
+            + store.getMemstoreFlushSize() + " memstoreSize="
+            + store.getMemStoreSize() + " size=" + store.getSize()
+            + " storeFilesCount=" + store.getStorefilesCount());
+      }
+    }
+  }
+
+  @Override
+  public InternalScanner preCompact(
+      ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+      InternalScanner scanner, ScanType scanType, CompactionRequest request)
+      throws IOException {
+
+    FlowScannerOperation requestOp = FlowScannerOperation.MINOR_COMPACTION;
+    if (request != null) {
+      requestOp = (request.isMajor() ? FlowScannerOperation.MAJOR_COMPACTION
+          : FlowScannerOperation.MINOR_COMPACTION);
+      LOG.info("Compactionrequest= " + request.toString() + " "
+          + requestOp.toString() + " RegionName=" + e.getEnvironment()
+              .getRegion().getRegionInfo().getRegionNameAsString());
+    }
+    return new FlowScanner(e.getEnvironment(), scanner, requestOp);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
new file mode 100644
index 0000000..7f09e51
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
@@ -0,0 +1,723 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.ScannerContext;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineServerUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Invoked via the coprocessor when a Get or a Scan is issued for flow run
+ * table. Looks through the list of cells per row, checks their tags and does
+ * operation on those cells as per the cell tags. Transforms reads of the stored
+ * metrics into calculated sums for each column Also, finds the min and max for
+ * start and end times in a flow run.
+ */
+class FlowScanner implements RegionScanner, Closeable {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FlowScanner.class);
+
+  /**
+   * use a special application id to represent the flow id this is needed since
+   * TimestampGenerator parses the app id to generate a cell timestamp.
+   */
+  private static final String FLOW_APP_ID = "application_00000000000_0000";
+
+  private final Region region;
+  private final InternalScanner flowRunScanner;
+  private final int batchSize;
+  private final long appFinalValueRetentionThreshold;
+  private RegionScanner regionScanner;
+  private boolean hasMore;
+  private byte[] currentRow;
+  private List<Cell> availableCells = new ArrayList<>();
+  private int currentIndex;
+  private FlowScannerOperation action = FlowScannerOperation.READ;
+
+  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner internalScanner,
+      FlowScannerOperation action) {
+    this(env, null, internalScanner, action);
+  }
+
+  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
+      InternalScanner internalScanner, FlowScannerOperation action) {
+    this.batchSize = incomingScan == null ? -1 : incomingScan.getBatch();
+    // TODO initialize other scan attributes like Scan#maxResultSize
+    this.flowRunScanner = internalScanner;
+    if (internalScanner instanceof RegionScanner) {
+      this.regionScanner = (RegionScanner) internalScanner;
+    }
+    this.action = action;
+    if (env == null) {
+      this.appFinalValueRetentionThreshold =
+          YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD;
+      this.region = null;
+    } else {
+      this.region = env.getRegion();
+      Configuration hbaseConf = env.getConfiguration();
+      this.appFinalValueRetentionThreshold = hbaseConf.getLong(
+          YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD,
+          YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD);
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(" batch size=" + batchSize);
+    }
+  }
+
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#getRegionInfo()
+   */
+  @Override
+  public HRegionInfo getRegionInfo() {
+    return region.getRegionInfo();
+  }
+
+  @Override
+  public boolean nextRaw(List<Cell> cells) throws IOException {
+    return nextRaw(cells, ScannerContext.newBuilder().build());
+  }
+
+  @Override
+  public boolean nextRaw(List<Cell> cells, ScannerContext scannerContext)
+      throws IOException {
+    return nextInternal(cells, scannerContext);
+  }
+
+  @Override
+  public boolean next(List<Cell> cells) throws IOException {
+    return next(cells, ScannerContext.newBuilder().build());
+  }
+
+  @Override
+  public boolean next(List<Cell> cells, ScannerContext scannerContext)
+      throws IOException {
+    return nextInternal(cells, scannerContext);
+  }
+
+  /**
+   * Get value converter associated with a column or a column prefix. If nothing
+   * matches, generic converter is returned.
+   * @param colQualifierBytes
+   * @return value converter implementation.
+   */
+  private static ValueConverter getValueConverter(byte[] colQualifierBytes) {
+    // Iterate over all the column prefixes for flow run table and get the
+    // appropriate converter for the column qualifier passed if prefix matches.
+    for (FlowRunColumnPrefix colPrefix : FlowRunColumnPrefix.values()) {
+      byte[] colPrefixBytes = colPrefix.getColumnPrefixBytes("");
+      if (Bytes.compareTo(colPrefixBytes, 0, colPrefixBytes.length,
+          colQualifierBytes, 0, colPrefixBytes.length) == 0) {
+        return colPrefix.getValueConverter();
+      }
+    }
+    // Iterate over all the columns for flow run table and get the
+    // appropriate converter for the column qualifier passed if match occurs.
+    for (FlowRunColumn column : FlowRunColumn.values()) {
+      if (Bytes.compareTo(
+          column.getColumnQualifierBytes(), colQualifierBytes) == 0) {
+        return column.getValueConverter();
+      }
+    }
+    // Return generic converter if nothing matches.
+    return GenericConverter.getInstance();
+  }
+
+  /**
+   * This method loops through the cells in a given row of the
+   * {@link FlowRunTable}. It looks at the tags of each cell to figure out how
+   * to process the contents. It then calculates the sum or min or max for each
+   * column or returns the cell as is.
+   *
+   * @param cells
+   * @param scannerContext
+   * @return true if next row is available for the scanner, false otherwise
+   * @throws IOException
+   */
+  private boolean nextInternal(List<Cell> cells, ScannerContext scannerContext)
+      throws IOException {
+    Cell cell = null;
+    startNext();
+    // Loop through all the cells in this row
+    // For min/max/metrics we do need to scan the entire set of cells to get the
+    // right one
+    // But with flush/compaction, the number of cells being scanned will go down
+    // cells are grouped per column qualifier then sorted by cell timestamp
+    // (latest to oldest) per column qualifier
+    // So all cells in one qualifier come one after the other before we see the
+    // next column qualifier
+    ByteArrayComparator comp = new ByteArrayComparator();
+    byte[] previousColumnQualifier = Separator.EMPTY_BYTES;
+    AggregationOperation currentAggOp = null;
+    SortedSet<Cell> currentColumnCells = new TreeSet<>(KeyValue.COMPARATOR);
+    Set<String> alreadySeenAggDim = new HashSet<>();
+    int addedCnt = 0;
+    long currentTimestamp = System.currentTimeMillis();
+    ValueConverter converter = null;
+    int limit = batchSize;
+
+    while (limit <= 0 || addedCnt < limit) {
+      cell = peekAtNextCell(scannerContext);
+      if (cell == null) {
+        break;
+      }
+      byte[] currentColumnQualifier = CellUtil.cloneQualifier(cell);
+      if (previousColumnQualifier == null) {
+        // first time in loop
+        previousColumnQualifier = currentColumnQualifier;
+      }
+
+      converter = getValueConverter(currentColumnQualifier);
+      if (comp.compare(previousColumnQualifier, currentColumnQualifier) != 0) {
+        addedCnt += emitCells(cells, currentColumnCells, currentAggOp,
+            converter, currentTimestamp);
+        resetState(currentColumnCells, alreadySeenAggDim);
+        previousColumnQualifier = currentColumnQualifier;
+        currentAggOp = getCurrentAggOp(cell);
+        converter = getValueConverter(currentColumnQualifier);
+      }
+      collectCells(currentColumnCells, currentAggOp, cell, alreadySeenAggDim,
+          converter, scannerContext);
+      nextCell(scannerContext);
+    }
+    if ((!currentColumnCells.isEmpty()) && ((limit <= 0 || addedCnt < limit))) {
+      addedCnt += emitCells(cells, currentColumnCells, currentAggOp, converter,
+          currentTimestamp);
+      if (LOG.isDebugEnabled()) {
+        if (addedCnt > 0) {
+          LOG.debug("emitted cells. " + addedCnt + " for " + this.action
+              + " rowKey="
+              + FlowRunRowKey.parseRowKey(CellUtil.cloneRow(cells.get(0))));
+        } else {
+          LOG.debug("emitted no cells for " + this.action);
+        }
+      }
+    }
+    return hasMore();
+  }
+
+  private AggregationOperation getCurrentAggOp(Cell cell) {
+    List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
+        cell.getTagsLength());
+    // We assume that all the operations for a particular column are the same
+    return HBaseTimelineServerUtils.getAggregationOperationFromTagsList(tags);
+  }
+
+  /**
+   * resets the parameters to an initialized state for next loop iteration.
+   */
+  private void resetState(SortedSet<Cell> currentColumnCells,
+      Set<String> alreadySeenAggDim) {
+    currentColumnCells.clear();
+    alreadySeenAggDim.clear();
+  }
+
+  private void collectCells(SortedSet<Cell> currentColumnCells,
+      AggregationOperation currentAggOp, Cell cell,
+      Set<String> alreadySeenAggDim, ValueConverter converter,
+      ScannerContext scannerContext) throws IOException {
+
+    if (currentAggOp == null) {
+      // not a min/max/metric cell, so just return it as is
+      currentColumnCells.add(cell);
+      return;
+    }
+
+    switch (currentAggOp) {
+    case GLOBAL_MIN:
+      if (currentColumnCells.size() == 0) {
+        currentColumnCells.add(cell);
+      } else {
+        Cell currentMinCell = currentColumnCells.first();
+        Cell newMinCell = compareCellValues(currentMinCell, cell, currentAggOp,
+            (NumericValueConverter) converter);
+        if (!currentMinCell.equals(newMinCell)) {
+          currentColumnCells.remove(currentMinCell);
+          currentColumnCells.add(newMinCell);
+        }
+      }
+      break;
+    case GLOBAL_MAX:
+      if (currentColumnCells.size() == 0) {
+        currentColumnCells.add(cell);
+      } else {
+        Cell currentMaxCell = currentColumnCells.first();
+        Cell newMaxCell = compareCellValues(currentMaxCell, cell, currentAggOp,
+            (NumericValueConverter) converter);
+        if (!currentMaxCell.equals(newMaxCell)) {
+          currentColumnCells.remove(currentMaxCell);
+          currentColumnCells.add(newMaxCell);
+        }
+      }
+      break;
+    case SUM:
+    case SUM_FINAL:
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("In collect cells "
+            + " FlowSannerOperation="
+            + this.action
+            + " currentAggOp="
+            + currentAggOp
+            + " cell qualifier="
+            + Bytes.toString(CellUtil.cloneQualifier(cell))
+            + " cell value= "
+            + converter.decodeValue(CellUtil.cloneValue(cell))
+            + " timestamp=" + cell.getTimestamp());
+      }
+
+      // only if this app has not been seen yet, add to current column cells
+      List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
+          cell.getTagsLength());
+      String aggDim = HBaseTimelineServerUtils
+          .getAggregationCompactionDimension(tags);
+      if (!alreadySeenAggDim.contains(aggDim)) {
+        // if this agg dimension has already been seen,
+        // since they show up in sorted order
+        // we drop the rest which are older
+        // in other words, this cell is older than previously seen cells
+        // for that agg dim
+        // but when this agg dim is not seen,
+        // consider this cell in our working set
+        currentColumnCells.add(cell);
+        alreadySeenAggDim.add(aggDim);
+      }
+      break;
+    default:
+      break;
+    } // end of switch case
+  }
+
+  /*
+   * Processes the cells in input param currentColumnCells and populates
+   * List<Cell> cells as the output based on the input AggregationOperation
+   * parameter.
+   */
+  private int emitCells(List<Cell> cells, SortedSet<Cell> currentColumnCells,
+      AggregationOperation currentAggOp, ValueConverter converter,
+      long currentTimestamp) throws IOException {
+    if ((currentColumnCells == null) || (currentColumnCells.size() == 0)) {
+      return 0;
+    }
+    if (currentAggOp == null) {
+      cells.addAll(currentColumnCells);
+      return currentColumnCells.size();
+    }
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("In emitCells " + this.action + " currentColumnCells size= "
+          + currentColumnCells.size() + " currentAggOp" + currentAggOp);
+    }
+
+    switch (currentAggOp) {
+    case GLOBAL_MIN:
+    case GLOBAL_MAX:
+      cells.addAll(currentColumnCells);
+      return currentColumnCells.size();
+    case SUM:
+    case SUM_FINAL:
+      switch (action) {
+      case FLUSH:
+      case MINOR_COMPACTION:
+        cells.addAll(currentColumnCells);
+        return currentColumnCells.size();
+      case READ:
+        Cell sumCell = processSummation(currentColumnCells,
+            (NumericValueConverter) converter);
+        cells.add(sumCell);
+        return 1;
+      case MAJOR_COMPACTION:
+        List<Cell> finalCells = processSummationMajorCompaction(
+            currentColumnCells, (NumericValueConverter) converter,
+            currentTimestamp);
+        cells.addAll(finalCells);
+        return finalCells.size();
+      default:
+        cells.addAll(currentColumnCells);
+        return currentColumnCells.size();
+      }
+    default:
+      cells.addAll(currentColumnCells);
+      return currentColumnCells.size();
+    }
+  }
+
+  /*
+   * Returns a cell whose value is the sum of all cell values in the input set.
+   * The new cell created has the timestamp of the most recent metric cell. The
+   * sum of a metric for a flow run is the summation at the point of the last
+   * metric update in that flow till that time.
+   */
+  private Cell processSummation(SortedSet<Cell> currentColumnCells,
+      NumericValueConverter converter) throws IOException {
+    Number sum = 0;
+    Number currentValue = 0;
+    long ts = 0L;
+    long mostCurrentTimestamp = 0L;
+    Cell mostRecentCell = null;
+    for (Cell cell : currentColumnCells) {
+      currentValue = (Number) converter.decodeValue(CellUtil.cloneValue(cell));
+      ts = cell.getTimestamp();
+      if (mostCurrentTimestamp < ts) {
+        mostCurrentTimestamp = ts;
+        mostRecentCell = cell;
+      }
+      sum = converter.add(sum, currentValue);
+    }
+    byte[] sumBytes = converter.encodeValue(sum);
+    Cell sumCell =
+        HBaseTimelineServerUtils.createNewCell(mostRecentCell, sumBytes);
+    return sumCell;
+  }
+
+
+  /**
+   * Returns a list of cells that contains
+   *
+   * A) the latest cells for applications that haven't finished yet
+   * B) summation
+   * for the flow, based on applications that have completed and are older than
+   * a certain time
+   *
+   * The new cell created has the timestamp of the most recent metric cell. The
+   * sum of a metric for a flow run is the summation at the point of the last
+   * metric update in that flow till that time.
+   */
+  @VisibleForTesting
+  List<Cell> processSummationMajorCompaction(
+      SortedSet<Cell> currentColumnCells, NumericValueConverter converter,
+      long currentTimestamp)
+      throws IOException {
+    Number sum = 0;
+    Number currentValue = 0;
+    long ts = 0L;
+    boolean summationDone = false;
+    List<Cell> finalCells = new ArrayList<Cell>();
+    if (currentColumnCells == null) {
+      return finalCells;
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("In processSummationMajorCompaction,"
+          + " will drop cells older than " + currentTimestamp
+          + " CurrentColumnCells size=" + currentColumnCells.size());
+    }
+
+    for (Cell cell : currentColumnCells) {
+      AggregationOperation cellAggOp = getCurrentAggOp(cell);
+      // if this is the existing flow sum cell
+      List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
+          cell.getTagsLength());
+      String appId = HBaseTimelineServerUtils
+          .getAggregationCompactionDimension(tags);
+      if (appId == FLOW_APP_ID) {
+        sum = converter.add(sum, currentValue);
+        summationDone = true;
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("reading flow app id sum=" + sum);
+        }
+      } else {
+        currentValue = (Number) converter.decodeValue(CellUtil
+            .cloneValue(cell));
+        // read the timestamp truncated by the generator
+        ts =  TimestampGenerator.getTruncatedTimestamp(cell.getTimestamp());
+        if ((cellAggOp == AggregationOperation.SUM_FINAL)
+            && ((ts + this.appFinalValueRetentionThreshold)
+                < currentTimestamp)) {
+          sum = converter.add(sum, currentValue);
+          summationDone = true;
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("MAJOR COMPACTION loop sum= " + sum
+                + " discarding now: " + " qualifier="
+                + Bytes.toString(CellUtil.cloneQualifier(cell)) + " value="
+                + converter.decodeValue(CellUtil.cloneValue(cell))
+                + " timestamp=" + cell.getTimestamp() + " " + this.action);
+          }
+        } else {
+          // not a final value but it's the latest cell for this app
+          // so include this cell in the list of cells to write back
+          finalCells.add(cell);
+        }
+      }
+    }
+    if (summationDone) {
+      Cell anyCell = currentColumnCells.first();
+      List<Tag> tags = new ArrayList<Tag>();
+      Tag t = new Tag(AggregationOperation.SUM_FINAL.getTagType(),
+          Bytes.toBytes(FLOW_APP_ID));
+      tags.add(t);
+      t = new Tag(AggregationCompactionDimension.APPLICATION_ID.getTagType(),
+          Bytes.toBytes(FLOW_APP_ID));
+      tags.add(t);
+      byte[] tagByteArray = Tag.fromList(tags);
+      Cell sumCell = HBaseTimelineServerUtils.createNewCell(
+          CellUtil.cloneRow(anyCell),
+          CellUtil.cloneFamily(anyCell),
+          CellUtil.cloneQualifier(anyCell),
+          TimestampGenerator.getSupplementedTimestamp(
+              System.currentTimeMillis(), FLOW_APP_ID),
+              converter.encodeValue(sum), tagByteArray);
+      finalCells.add(sumCell);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("MAJOR COMPACTION final sum= " + sum + " for "
+            + Bytes.toString(CellUtil.cloneQualifier(sumCell))
+            + " " + this.action);
+      }
+      LOG.info("After major compaction for qualifier="
+          + Bytes.toString(CellUtil.cloneQualifier(sumCell))
+          + " with currentColumnCells.size="
+          + currentColumnCells.size()
+          + " returning finalCells.size=" + finalCells.size()
+          + " with sum=" + sum.longValue()
+          + " with cell timestamp " + sumCell.getTimestamp());
+    } else {
+      String qualifier = "";
+      LOG.info("After major compaction for qualifier=" + qualifier
+          + " with currentColumnCells.size="
+          + currentColumnCells.size()
+          + " returning finalCells.size=" + finalCells.size()
+          + " with zero sum="
+          + sum.longValue());
+    }
+    return finalCells;
+  }
+
+  /**
+   * Determines which cell is to be returned based on the values in each cell
+   * and the comparison operation MIN or MAX.
+   *
+   * @param previouslyChosenCell
+   * @param currentCell
+   * @param currentAggOp
+   * @return the cell which is the min (or max) cell
+   * @throws IOException
+   */
+  private Cell compareCellValues(Cell previouslyChosenCell, Cell currentCell,
+      AggregationOperation currentAggOp, NumericValueConverter converter)
+      throws IOException {
+    if (previouslyChosenCell == null) {
+      return currentCell;
+    }
+    try {
+      Number previouslyChosenCellValue = (Number)converter.decodeValue(
+          CellUtil.cloneValue(previouslyChosenCell));
+      Number currentCellValue = (Number) converter.decodeValue(CellUtil
+          .cloneValue(currentCell));
+      switch (currentAggOp) {
+      case GLOBAL_MIN:
+        if (converter.compare(
+            currentCellValue, previouslyChosenCellValue) < 0) {
+          // new value is minimum, hence return this cell
+          return currentCell;
+        } else {
+          // previously chosen value is miniumum, hence return previous min cell
+          return previouslyChosenCell;
+        }
+      case GLOBAL_MAX:
+        if (converter.compare(
+            currentCellValue, previouslyChosenCellValue) > 0) {
+          // new value is max, hence return this cell
+          return currentCell;
+        } else {
+          // previously chosen value is max, hence return previous max cell
+          return previouslyChosenCell;
+        }
+      default:
+        return currentCell;
+      }
+    } catch (IllegalArgumentException iae) {
+      LOG.error("caught iae during conversion to long ", iae);
+      return currentCell;
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (flowRunScanner != null) {
+      flowRunScanner.close();
+    } else {
+      LOG.warn("scanner close called but scanner is null");
+    }
+  }
+
+  /**
+   * Called to signal the start of the next() call by the scanner.
+   */
+  public void startNext() {
+    currentRow = null;
+  }
+
+  /**
+   * Returns whether or not the underlying scanner has more rows.
+   */
+  public boolean hasMore() {
+    return currentIndex < availableCells.size() ? true : hasMore;
+  }
+
+  /**
+   * Returns the next available cell for the current row and advances the
+   * pointer to the next cell. This method can be called multiple times in a row
+   * to advance through all the available cells.
+   *
+   * @param scannerContext
+   *          context information for the batch of cells under consideration
+   * @return the next available cell or null if no more cells are available for
+   *         the current row
+   * @throws IOException
+   */
+  public Cell nextCell(ScannerContext scannerContext) throws IOException {
+    Cell cell = peekAtNextCell(scannerContext);
+    if (cell != null) {
+      currentIndex++;
+    }
+    return cell;
+  }
+
+  /**
+   * Returns the next available cell for the current row, without advancing the
+   * pointer. Calling this method multiple times in a row will continue to
+   * return the same cell.
+   *
+   * @param scannerContext
+   *          context information for the batch of cells under consideration
+   * @return the next available cell or null if no more cells are available for
+   *         the current row
+   * @throws IOException if any problem is encountered while grabbing the next
+   *     cell.
+   */
+  public Cell peekAtNextCell(ScannerContext scannerContext) throws IOException {
+    if (currentIndex >= availableCells.size()) {
+      // done with current batch
+      availableCells.clear();
+      currentIndex = 0;
+      hasMore = flowRunScanner.next(availableCells, scannerContext);
+    }
+    Cell cell = null;
+    if (currentIndex < availableCells.size()) {
+      cell = availableCells.get(currentIndex);
+      if (currentRow == null) {
+        currentRow = CellUtil.cloneRow(cell);
+      } else if (!CellUtil.matchingRow(cell, currentRow)) {
+        // moved on to the next row
+        // don't use the current cell
+        // also signal no more cells for this row
+        return null;
+      }
+    }
+    return cell;
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#getMaxResultSize()
+   */
+  @Override
+  public long getMaxResultSize() {
+    if (regionScanner == null) {
+      throw new IllegalStateException(
+          "RegionScanner.isFilterDone() called when the flow "
+              + "scanner's scanner is not a RegionScanner");
+    }
+    return regionScanner.getMaxResultSize();
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#getMvccReadPoint()
+   */
+  @Override
+  public long getMvccReadPoint() {
+    if (regionScanner == null) {
+      throw new IllegalStateException(
+          "RegionScanner.isFilterDone() called when the flow "
+              + "scanner's internal scanner is not a RegionScanner");
+    }
+    return regionScanner.getMvccReadPoint();
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#isFilterDone()
+   */
+  @Override
+  public boolean isFilterDone() throws IOException {
+    if (regionScanner == null) {
+      throw new IllegalStateException(
+          "RegionScanner.isFilterDone() called when the flow "
+              + "scanner's internal scanner is not a RegionScanner");
+    }
+    return regionScanner.isFilterDone();
+
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#reseek(byte[])
+   */
+  @Override
+  public boolean reseek(byte[] bytes) throws IOException {
+    if (regionScanner == null) {
+      throw new IllegalStateException(
+          "RegionScanner.reseek() called when the flow "
+              + "scanner's internal scanner is not a RegionScanner");
+    }
+    return regionScanner.reseek(bytes);
+  }
+
+  @Override
+  public int getBatch() {
+    return batchSize;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScannerOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScannerOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScannerOperation.java
new file mode 100644
index 0000000..73c666f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScannerOperation.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+
+/**
+ * Identifies the scanner operation on the {@link FlowRunTable}.
+ */
+public enum FlowScannerOperation {
+
+  /**
+   * If the scanner is opened for reading
+   * during preGet or preScan.
+   */
+  READ,
+
+  /**
+   * If the scanner is opened during preFlush.
+   */
+  FLUSH,
+
+  /**
+   * If the scanner is opened during minor Compaction.
+   */
+  MINOR_COMPACTION,
+
+  /**
+   * If the scanner is opened during major Compaction.
+   */
+  MAJOR_COMPACTION
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
new file mode 100644
index 0000000..04963f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.flow
+ * contains classes related to implementation for flow related tables, viz. flow
+ * run table and flow activity table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
new file mode 100644
index 0000000..e78db2a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage contains
+ * classes which define and implement reading and writing to backend storage.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
index 6369864..7e5a803 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/pom.xml
@@ -18,197 +18,24 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0"
          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-                             http://maven.apache.org/xsd/maven-4.0.0.xsd">
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <artifactId>hadoop-yarn-server</artifactId>
     <groupId>org.apache.hadoop</groupId>
     <version>3.2.0-SNAPSHOT</version>
   </parent>
+
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop YARN TimelineService HBase Backend</name>
+  <packaging>pom</packaging>
 
-  <properties>
-    <!-- Needed for generating FindBugs warnings using parent pom -->
-    <yarn.basedir>${project.parent.parent.basedir}</yarn.basedir>
-  </properties>
-
-  <dependencies>
-    <dependency>
-      <groupId>commons-logging</groupId>
-      <artifactId>commons-logging</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-annotations</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-api</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-server-applicationhistoryservice</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-yarn-server-timelineservice</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-common</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-mapreduce-client-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-client</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-mapreduce-client-core</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hbase</groupId>
-      <artifactId>hbase-server</artifactId>
-      <scope>provided</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs-client</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-client</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-mapreduce-client-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-util</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty-sslengine</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
+  <modules>
+    <module>hadoop-yarn-server-timelineservice-hbase-client</module>
+    <module>hadoop-yarn-server-timelineservice-hbase-common</module>
+    <module>hadoop-yarn-server-timelineservice-hbase-server</module>
+  </modules>
 
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
 
-  <build>
-    <plugins>
-      <plugin>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-            <phase>test-compile</phase>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-javadoc-plugin</artifactId>
-        <configuration>
-          <additionnalDependencies>
-            <additionnalDependency>
-              <groupId>junit</groupId>
-              <artifactId>junit</artifactId>
-              <version>4.11</version>
-            </additionnalDependency>
-          </additionnalDependencies>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-dependency-plugin</artifactId>
-        <executions>
-          <execution>
-            <phase>package</phase>
-            <goals>
-              <goal>copy-dependencies</goal>
-            </goals>
-            <configuration>
-              <includeScope>runtime</includeScope>
-              <excludeGroupIds>org.slf4j,org.apache.hadoop,com.github.stephenc.findbugs</excludeGroupIds>
-              <outputDirectory>${project.build.directory}/lib</outputDirectory>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
deleted file mode 100644
index 8b46d32..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/TimelineFilterUtils.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Set of utility methods used by timeline filter classes.
- */
-public final class TimelineFilterUtils {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TimelineFilterUtils.class);
-
-  private TimelineFilterUtils() {
-  }
-
-  /**
-   * Returns the equivalent HBase filter list's {@link Operator}.
-   *
-   * @param op timeline filter list operator.
-   * @return HBase filter list's Operator.
-   */
-  private static Operator getHBaseOperator(TimelineFilterList.Operator op) {
-    switch (op) {
-    case AND:
-      return Operator.MUST_PASS_ALL;
-    case OR:
-      return Operator.MUST_PASS_ONE;
-    default:
-      throw new IllegalArgumentException("Invalid operator");
-    }
-  }
-
-  /**
-   * Returns the equivalent HBase compare filter's {@link CompareOp}.
-   *
-   * @param op timeline compare op.
-   * @return HBase compare filter's CompareOp.
-   */
-  private static CompareOp getHBaseCompareOp(
-      TimelineCompareOp op) {
-    switch (op) {
-    case LESS_THAN:
-      return CompareOp.LESS;
-    case LESS_OR_EQUAL:
-      return CompareOp.LESS_OR_EQUAL;
-    case EQUAL:
-      return CompareOp.EQUAL;
-    case NOT_EQUAL:
-      return CompareOp.NOT_EQUAL;
-    case GREATER_OR_EQUAL:
-      return CompareOp.GREATER_OR_EQUAL;
-    case GREATER_THAN:
-      return CompareOp.GREATER;
-    default:
-      throw new IllegalArgumentException("Invalid compare operator");
-    }
-  }
-
-  /**
-   * Converts a {@link TimelinePrefixFilter} to an equivalent HBase
-   * {@link QualifierFilter}.
-   * @param colPrefix
-   * @param filter
-   * @return a {@link QualifierFilter} object
-   */
-  private static <T> Filter createHBaseColQualPrefixFilter(
-      ColumnPrefix<T> colPrefix, TimelinePrefixFilter filter) {
-    return new QualifierFilter(getHBaseCompareOp(filter.getCompareOp()),
-        new BinaryPrefixComparator(
-            colPrefix.getColumnPrefixBytes(filter.getPrefix())));
-  }
-
-  /**
-   * Create a HBase {@link QualifierFilter} for the passed column prefix and
-   * compare op.
-   *
-   * @param <T> Describes the type of column prefix.
-   * @param compareOp compare op.
-   * @param columnPrefix column prefix.
-   * @return a column qualifier filter.
-   */
-  public static <T> Filter createHBaseQualifierFilter(CompareOp compareOp,
-      ColumnPrefix<T> columnPrefix) {
-    return new QualifierFilter(compareOp,
-        new BinaryPrefixComparator(
-            columnPrefix.getColumnPrefixBytes("")));
-  }
-
-  /**
-   * Create filters for confs or metrics to retrieve. This list includes a
-   * configs/metrics family filter and relevant filters for confs/metrics to
-   * retrieve, if present.
-   *
-   * @param <T> Describes the type of column prefix.
-   * @param confsOrMetricToRetrieve configs/metrics to retrieve.
-   * @param columnFamily config or metric column family.
-   * @param columnPrefix config or metric column prefix.
-   * @return a filter list.
-   * @throws IOException if any problem occurs while creating the filters.
-   */
-  public static <T> Filter createFilterForConfsOrMetricsToRetrieve(
-      TimelineFilterList confsOrMetricToRetrieve, ColumnFamily<T> columnFamily,
-      ColumnPrefix<T> columnPrefix) throws IOException {
-    Filter familyFilter = new FamilyFilter(CompareOp.EQUAL,
-        new BinaryComparator(columnFamily.getBytes()));
-    if (confsOrMetricToRetrieve != null &&
-        !confsOrMetricToRetrieve.getFilterList().isEmpty()) {
-      // If confsOrMetricsToRetrive are specified, create a filter list based
-      // on it and family filter.
-      FilterList filter = new FilterList(familyFilter);
-      filter.addFilter(
-          createHBaseFilterList(columnPrefix, confsOrMetricToRetrieve));
-      return filter;
-    } else {
-      // Only the family filter needs to be added.
-      return familyFilter;
-    }
-  }
-
-  /**
-   * Create 2 HBase {@link SingleColumnValueFilter} filters for the specified
-   * value range represented by start and end value and wraps them inside a
-   * filter list. Start and end value should not be null.
-   *
-   * @param <T> Describes the type of column prefix.
-   * @param column Column for which single column value filter is to be created.
-   * @param startValue Start value.
-   * @param endValue End value.
-   * @return 2 single column value filters wrapped in a filter list.
-   * @throws IOException if any problem is encountered while encoding value.
-   */
-  public static <T> FilterList createSingleColValueFiltersByRange(
-      Column<T> column, Object startValue, Object endValue) throws IOException {
-    FilterList list = new FilterList();
-    Filter singleColValFilterStart = createHBaseSingleColValueFilter(
-        column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
-        column.getValueConverter().encodeValue(startValue),
-        CompareOp.GREATER_OR_EQUAL, true);
-    list.addFilter(singleColValFilterStart);
-
-    Filter singleColValFilterEnd = createHBaseSingleColValueFilter(
-        column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
-        column.getValueConverter().encodeValue(endValue),
-        CompareOp.LESS_OR_EQUAL, true);
-    list.addFilter(singleColValFilterEnd);
-    return list;
-  }
-
-  /**
-   * Creates a HBase {@link SingleColumnValueFilter} with specified column.
-   * @param <T> Describes the type of column prefix.
-   * @param column Column which value to be filtered.
-   * @param value Value to be filtered.
-   * @param op Compare operator
-   * @return a SingleColumnValue Filter
-   * @throws IOException if any exception.
-   */
-  public static <T> Filter createHBaseSingleColValueFilter(Column<T> column,
-      Object value, CompareOp op) throws IOException {
-    Filter singleColValFilter = createHBaseSingleColValueFilter(
-        column.getColumnFamilyBytes(), column.getColumnQualifierBytes(),
-        column.getValueConverter().encodeValue(value), op, true);
-    return singleColValFilter;
-  }
-
-  /**
-   * Creates a HBase {@link SingleColumnValueFilter}.
-   *
-   * @param columnFamily Column Family represented as bytes.
-   * @param columnQualifier Column Qualifier represented as bytes.
-   * @param value Value.
-   * @param compareOp Compare operator.
-   * @param filterIfMissing This flag decides if we should filter the row if the
-   *     specified column is missing. This is based on the filter's keyMustExist
-   *     field.
-   * @return a {@link SingleColumnValueFilter} object
-   * @throws IOException
-   */
-  private static SingleColumnValueFilter createHBaseSingleColValueFilter(
-      byte[] columnFamily, byte[] columnQualifier, byte[] value,
-      CompareOp compareOp, boolean filterIfMissing) throws IOException {
-    SingleColumnValueFilter singleColValFilter =
-        new SingleColumnValueFilter(columnFamily, columnQualifier, compareOp,
-        new BinaryComparator(value));
-    singleColValFilter.setLatestVersionOnly(true);
-    singleColValFilter.setFilterIfMissing(filterIfMissing);
-    return singleColValFilter;
-  }
-
-  /**
-   * Fetch columns from filter list containing exists and multivalue equality
-   * filters. This is done to fetch only required columns from back-end and
-   * then match event filters or relationships in reader.
-   *
-   * @param filterList filter list.
-   * @return set of columns.
-   */
-  public static Set<String> fetchColumnsFromFilterList(
-      TimelineFilterList filterList) {
-    Set<String> strSet = new HashSet<String>();
-    for (TimelineFilter filter : filterList.getFilterList()) {
-      switch(filter.getFilterType()) {
-      case LIST:
-        strSet.addAll(fetchColumnsFromFilterList((TimelineFilterList)filter));
-        break;
-      case KEY_VALUES:
-        strSet.add(((TimelineKeyValuesFilter)filter).getKey());
-        break;
-      case EXISTS:
-        strSet.add(((TimelineExistsFilter)filter).getValue());
-        break;
-      default:
-        LOG.info("Unexpected filter type " + filter.getFilterType());
-        break;
-      }
-    }
-    return strSet;
-  }
-
-  /**
-   * Creates equivalent HBase {@link FilterList} from {@link TimelineFilterList}
-   * while converting different timeline filters(of type {@link TimelineFilter})
-   * into their equivalent HBase filters.
-   *
-   * @param <T> Describes the type of column prefix.
-   * @param colPrefix column prefix which will be used for conversion.
-   * @param filterList timeline filter list which has to be converted.
-   * @return A {@link FilterList} object.
-   * @throws IOException if any problem occurs while creating the filter list.
-   */
-  public static <T> FilterList createHBaseFilterList(ColumnPrefix<T> colPrefix,
-      TimelineFilterList filterList) throws IOException {
-    FilterList list =
-        new FilterList(getHBaseOperator(filterList.getOperator()));
-    for (TimelineFilter filter : filterList.getFilterList()) {
-      switch(filter.getFilterType()) {
-      case LIST:
-        list.addFilter(createHBaseFilterList(colPrefix,
-            (TimelineFilterList)filter));
-        break;
-      case PREFIX:
-        list.addFilter(createHBaseColQualPrefixFilter(colPrefix,
-            (TimelinePrefixFilter)filter));
-        break;
-      case COMPARE:
-        TimelineCompareFilter compareFilter = (TimelineCompareFilter)filter;
-        list.addFilter(
-            createHBaseSingleColValueFilter(
-                colPrefix.getColumnFamilyBytes(),
-                colPrefix.getColumnPrefixBytes(compareFilter.getKey()),
-                colPrefix.getValueConverter().
-                    encodeValue(compareFilter.getValue()),
-                getHBaseCompareOp(compareFilter.getCompareOp()),
-                compareFilter.getKeyMustExist()));
-        break;
-      case KEY_VALUE:
-        TimelineKeyValueFilter kvFilter = (TimelineKeyValueFilter)filter;
-        list.addFilter(
-            createHBaseSingleColValueFilter(
-                colPrefix.getColumnFamilyBytes(),
-                colPrefix.getColumnPrefixBytes(kvFilter.getKey()),
-                colPrefix.getValueConverter().encodeValue(kvFilter.getValue()),
-                getHBaseCompareOp(kvFilter.getCompareOp()),
-                kvFilter.getKeyMustExist()));
-        break;
-      default:
-        LOG.info("Unexpected filter type " + filter.getFilterType());
-        break;
-      }
-    }
-    return list;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/package-info.java
deleted file mode 100644
index f7c0705..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/filter/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.server.timelineservice.reader.filter stores
- * timeline filter implementations.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.reader.filter;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
deleted file mode 100644
index 1ebfab2..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-
-import java.io.IOException;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.reader.EntityTypeReader;
-import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReader;
-import org.apache.hadoop.yarn.server.timelineservice.storage.reader.TimelineEntityReaderFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * HBase based implementation for {@link TimelineReader}.
- */
-public class HBaseTimelineReaderImpl
-    extends AbstractService implements TimelineReader {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(HBaseTimelineReaderImpl.class);
-
-  private Configuration hbaseConf = null;
-  private Connection conn;
-
-  public HBaseTimelineReaderImpl() {
-    super(HBaseTimelineReaderImpl.class.getName());
-  }
-
-  @Override
-  public void serviceInit(Configuration conf) throws Exception {
-    super.serviceInit(conf);
-    hbaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
-    conn = ConnectionFactory.createConnection(hbaseConf);
-  }
-
-  @Override
-  protected void serviceStop() throws Exception {
-    if (conn != null) {
-      LOG.info("closing the hbase Connection");
-      conn.close();
-    }
-    super.serviceStop();
-  }
-
-  @Override
-  public TimelineEntity getEntity(TimelineReaderContext context,
-      TimelineDataToRetrieve dataToRetrieve) throws IOException {
-    TimelineEntityReader reader =
-        TimelineEntityReaderFactory.createSingleEntityReader(context,
-            dataToRetrieve);
-    return reader.readEntity(hbaseConf, conn);
-  }
-
-  @Override
-  public Set<TimelineEntity> getEntities(TimelineReaderContext context,
-      TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve)
-      throws IOException {
-    TimelineEntityReader reader =
-        TimelineEntityReaderFactory.createMultipleEntitiesReader(context,
-            filters, dataToRetrieve);
-    return reader.readEntities(hbaseConf, conn);
-  }
-
-  @Override
-  public Set<String> getEntityTypes(TimelineReaderContext context)
-      throws IOException {
-    EntityTypeReader reader = new EntityTypeReader(context);
-    return reader.readEntityTypes(hbaseConf, conn);
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
deleted file mode 100644
index f938185..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineWriterImpl.java
+++ /dev/null
@@ -1,593 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.service.AbstractService;
-import  org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse;
-import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
-import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This implements a hbase based backend for storing the timeline entity
- * information.
- * It writes to multiple tables at the backend
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class HBaseTimelineWriterImpl extends AbstractService implements
-    TimelineWriter {
-
-  private static final Logger LOG = LoggerFactory
-      .getLogger(HBaseTimelineWriterImpl.class);
-
-  private Connection conn;
-  private TypedBufferedMutator<EntityTable> entityTable;
-  private TypedBufferedMutator<AppToFlowTable> appToFlowTable;
-  private TypedBufferedMutator<ApplicationTable> applicationTable;
-  private TypedBufferedMutator<FlowActivityTable> flowActivityTable;
-  private TypedBufferedMutator<FlowRunTable> flowRunTable;
-  private TypedBufferedMutator<SubApplicationTable> subApplicationTable;
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter<String> stringKeyConverter =
-      new StringKeyConverter();
-
-  /**
-   * Used to convert Long key components to and from storage format.
-   */
-  private final KeyConverter<Long> longKeyConverter = new LongKeyConverter();
-
-  private enum Tables {
-    APPLICATION_TABLE, ENTITY_TABLE, SUBAPPLICATION_TABLE
-  };
-
-  public HBaseTimelineWriterImpl() {
-    super(HBaseTimelineWriterImpl.class.getName());
-  }
-
-  /**
-   * initializes the hbase connection to write to the entity table.
-   */
-  @Override
-  protected void serviceInit(Configuration conf) throws Exception {
-    super.serviceInit(conf);
-    Configuration hbaseConf =
-        HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
-    conn = ConnectionFactory.createConnection(hbaseConf);
-    entityTable = new EntityTable().getTableMutator(hbaseConf, conn);
-    appToFlowTable = new AppToFlowTable().getTableMutator(hbaseConf, conn);
-    applicationTable = new ApplicationTable().getTableMutator(hbaseConf, conn);
-    flowRunTable = new FlowRunTable().getTableMutator(hbaseConf, conn);
-    flowActivityTable =
-        new FlowActivityTable().getTableMutator(hbaseConf, conn);
-    subApplicationTable =
-        new SubApplicationTable().getTableMutator(hbaseConf, conn);
-
-    UserGroupInformation ugi = UserGroupInformation.isSecurityEnabled() ?
-        UserGroupInformation.getLoginUser() :
-        UserGroupInformation.getCurrentUser();
-    LOG.info("Initialized HBaseTimelineWriterImpl UGI to " + ugi);
-  }
-
-  /**
-   * Stores the entire information in TimelineEntities to the timeline store.
-   */
-  @Override
-  public TimelineWriteResponse write(TimelineCollectorContext context,
-      TimelineEntities data, UserGroupInformation callerUgi)
-      throws IOException {
-
-    TimelineWriteResponse putStatus = new TimelineWriteResponse();
-
-    String clusterId = context.getClusterId();
-    String userId = context.getUserId();
-    String flowName = context.getFlowName();
-    String flowVersion = context.getFlowVersion();
-    long flowRunId = context.getFlowRunId();
-    String appId = context.getAppId();
-    String subApplicationUser = callerUgi.getShortUserName();
-
-    // defensive coding to avoid NPE during row key construction
-    if ((flowName == null) || (appId == null) || (clusterId == null)
-        || (userId == null)) {
-      LOG.warn("Found null for one of: flowName=" + flowName + " appId=" + appId
-          + " userId=" + userId + " clusterId=" + clusterId
-          + " . Not proceeding with writing to hbase");
-      return putStatus;
-    }
-
-    for (TimelineEntity te : data.getEntities()) {
-
-      // a set can have at most 1 null
-      if (te == null) {
-        continue;
-      }
-
-      // if the entity is the application, the destination is the application
-      // table
-      boolean isApplication = ApplicationEntity.isApplicationEntity(te);
-      byte[] rowKey;
-      if (isApplication) {
-        ApplicationRowKey applicationRowKey =
-            new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
-                appId);
-        rowKey = applicationRowKey.getRowKey();
-        store(rowKey, te, flowVersion, Tables.APPLICATION_TABLE);
-      } else {
-        EntityRowKey entityRowKey =
-            new EntityRowKey(clusterId, userId, flowName, flowRunId, appId,
-                te.getType(), te.getIdPrefix(), te.getId());
-        rowKey = entityRowKey.getRowKey();
-        store(rowKey, te, flowVersion, Tables.ENTITY_TABLE);
-      }
-
-      if (!isApplication && !userId.equals(subApplicationUser)) {
-        SubApplicationRowKey subApplicationRowKey =
-            new SubApplicationRowKey(subApplicationUser, clusterId,
-                te.getType(), te.getIdPrefix(), te.getId(), userId);
-        rowKey = subApplicationRowKey.getRowKey();
-        store(rowKey, te, flowVersion, Tables.SUBAPPLICATION_TABLE);
-      }
-
-      if (isApplication) {
-        TimelineEvent event =
-            ApplicationEntity.getApplicationEvent(te,
-                ApplicationMetricsConstants.CREATED_EVENT_TYPE);
-        FlowRunRowKey flowRunRowKey =
-            new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
-        if (event != null) {
-          onApplicationCreated(flowRunRowKey, clusterId, appId, userId,
-              flowVersion, te, event.getTimestamp());
-        }
-        // if it's an application entity, store metrics
-        storeFlowMetricsAppRunning(flowRunRowKey, appId, te);
-        // if application has finished, store it's finish time and write final
-        // values of all metrics
-        event = ApplicationEntity.getApplicationEvent(te,
-            ApplicationMetricsConstants.FINISHED_EVENT_TYPE);
-        if (event != null) {
-          onApplicationFinished(flowRunRowKey, flowVersion, appId, te,
-              event.getTimestamp());
-        }
-      }
-    }
-    return putStatus;
-  }
-
-  private void onApplicationCreated(FlowRunRowKey flowRunRowKey,
-      String clusterId, String appId, String userId, String flowVersion,
-      TimelineEntity te, long appCreatedTimeStamp)
-      throws IOException {
-
-    String flowName = flowRunRowKey.getFlowName();
-    Long flowRunId = flowRunRowKey.getFlowRunId();
-
-    // store in App to flow table
-    AppToFlowRowKey appToFlowRowKey = new AppToFlowRowKey(appId);
-    byte[] rowKey = appToFlowRowKey.getRowKey();
-    AppToFlowColumnPrefix.FLOW_NAME.store(rowKey, appToFlowTable, clusterId,
-        null, flowName);
-    AppToFlowColumnPrefix.FLOW_RUN_ID.store(rowKey, appToFlowTable, clusterId,
-        null, flowRunId);
-    AppToFlowColumnPrefix.USER_ID.store(rowKey, appToFlowTable, clusterId, null,
-        userId);
-
-    // store in flow run table
-    storeAppCreatedInFlowRunTable(flowRunRowKey, appId, te);
-
-    // store in flow activity table
-    byte[] flowActivityRowKeyBytes =
-        new FlowActivityRowKey(flowRunRowKey.getClusterId(),
-            appCreatedTimeStamp, flowRunRowKey.getUserId(), flowName)
-            .getRowKey();
-    byte[] qualifier = longKeyConverter.encode(flowRunRowKey.getFlowRunId());
-    FlowActivityColumnPrefix.RUN_ID.store(flowActivityRowKeyBytes,
-        flowActivityTable, qualifier, null, flowVersion,
-        AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
-  }
-
-  /*
-   * updates the {@link FlowRunTable} with Application Created information
-   */
-  private void storeAppCreatedInFlowRunTable(FlowRunRowKey flowRunRowKey,
-      String appId, TimelineEntity te) throws IOException {
-    byte[] rowKey = flowRunRowKey.getRowKey();
-    FlowRunColumn.MIN_START_TIME.store(rowKey, flowRunTable, null,
-        te.getCreatedTime(),
-        AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
-  }
-
-
-  /*
-   * updates the {@link FlowRunTable} and {@link FlowActivityTable} when an
-   * application has finished
-   */
-  private void onApplicationFinished(FlowRunRowKey flowRunRowKey,
-      String flowVersion, String appId, TimelineEntity te,
-      long appFinishedTimeStamp) throws IOException {
-    // store in flow run table
-    storeAppFinishedInFlowRunTable(flowRunRowKey, appId, te,
-        appFinishedTimeStamp);
-
-    // indicate in the flow activity table that the app has finished
-    byte[] rowKey =
-        new FlowActivityRowKey(flowRunRowKey.getClusterId(),
-            appFinishedTimeStamp, flowRunRowKey.getUserId(),
-            flowRunRowKey.getFlowName()).getRowKey();
-    byte[] qualifier = longKeyConverter.encode(flowRunRowKey.getFlowRunId());
-    FlowActivityColumnPrefix.RUN_ID.store(rowKey, flowActivityTable, qualifier,
-        null, flowVersion,
-        AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId));
-  }
-
-  /*
-   * Update the {@link FlowRunTable} with Application Finished information
-   */
-  private void storeAppFinishedInFlowRunTable(FlowRunRowKey flowRunRowKey,
-      String appId, TimelineEntity te, long appFinishedTimeStamp)
-      throws IOException {
-    byte[] rowKey = flowRunRowKey.getRowKey();
-    Attribute attributeAppId =
-        AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId);
-    FlowRunColumn.MAX_END_TIME.store(rowKey, flowRunTable, null,
-        appFinishedTimeStamp, attributeAppId);
-
-    // store the final value of metrics since application has finished
-    Set<TimelineMetric> metrics = te.getMetrics();
-    if (metrics != null) {
-      storeFlowMetrics(rowKey, metrics, attributeAppId,
-          AggregationOperation.SUM_FINAL.getAttribute());
-    }
-  }
-
-  /*
-   * Updates the {@link FlowRunTable} with Application Metrics
-   */
-  private void storeFlowMetricsAppRunning(FlowRunRowKey flowRunRowKey,
-      String appId, TimelineEntity te) throws IOException {
-    Set<TimelineMetric> metrics = te.getMetrics();
-    if (metrics != null) {
-      byte[] rowKey = flowRunRowKey.getRowKey();
-      storeFlowMetrics(rowKey, metrics,
-          AggregationCompactionDimension.APPLICATION_ID.getAttribute(appId),
-          AggregationOperation.SUM.getAttribute());
-    }
-  }
-
-  private void storeFlowMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
-      Attribute... attributes) throws IOException {
-    for (TimelineMetric metric : metrics) {
-      byte[] metricColumnQualifier = stringKeyConverter.encode(metric.getId());
-      Map<Long, Number> timeseries = metric.getValues();
-      for (Map.Entry<Long, Number> timeseriesEntry : timeseries.entrySet()) {
-        Long timestamp = timeseriesEntry.getKey();
-        FlowRunColumnPrefix.METRIC.store(rowKey, flowRunTable,
-            metricColumnQualifier, timestamp, timeseriesEntry.getValue(),
-            attributes);
-      }
-    }
-  }
-
-  /**
-   * Stores the Relations from the {@linkplain TimelineEntity} object.
-   */
-  private <T> void storeRelations(byte[] rowKey,
-      Map<String, Set<String>> connectedEntities, ColumnPrefix<T> columnPrefix,
-      TypedBufferedMutator<T> table) throws IOException {
-    if (connectedEntities != null) {
-      for (Map.Entry<String, Set<String>> connectedEntity : connectedEntities
-          .entrySet()) {
-        // id3?id4?id5
-        String compoundValue =
-            Separator.VALUES.joinEncoded(connectedEntity.getValue());
-        columnPrefix.store(rowKey, table,
-            stringKeyConverter.encode(connectedEntity.getKey()), null,
-            compoundValue);
-      }
-    }
-  }
-
-  /**
-   * Stores information from the {@linkplain TimelineEntity} object.
-   */
-  private void store(byte[] rowKey, TimelineEntity te,
-      String flowVersion,
-      Tables table) throws IOException {
-    switch (table) {
-    case APPLICATION_TABLE:
-      ApplicationColumn.ID.store(rowKey, applicationTable, null, te.getId());
-      ApplicationColumn.CREATED_TIME.store(rowKey, applicationTable, null,
-          te.getCreatedTime());
-      ApplicationColumn.FLOW_VERSION.store(rowKey, applicationTable, null,
-          flowVersion);
-      storeInfo(rowKey, te.getInfo(), flowVersion, ApplicationColumnPrefix.INFO,
-          applicationTable);
-      storeMetrics(rowKey, te.getMetrics(), ApplicationColumnPrefix.METRIC,
-          applicationTable);
-      storeEvents(rowKey, te.getEvents(), ApplicationColumnPrefix.EVENT,
-          applicationTable);
-      storeConfig(rowKey, te.getConfigs(), ApplicationColumnPrefix.CONFIG,
-          applicationTable);
-      storeRelations(rowKey, te.getIsRelatedToEntities(),
-          ApplicationColumnPrefix.IS_RELATED_TO, applicationTable);
-      storeRelations(rowKey, te.getRelatesToEntities(),
-          ApplicationColumnPrefix.RELATES_TO, applicationTable);
-      break;
-    case ENTITY_TABLE:
-      EntityColumn.ID.store(rowKey, entityTable, null, te.getId());
-      EntityColumn.TYPE.store(rowKey, entityTable, null, te.getType());
-      EntityColumn.CREATED_TIME.store(rowKey, entityTable, null,
-          te.getCreatedTime());
-      EntityColumn.FLOW_VERSION.store(rowKey, entityTable, null, flowVersion);
-      storeInfo(rowKey, te.getInfo(), flowVersion, EntityColumnPrefix.INFO,
-          entityTable);
-      storeMetrics(rowKey, te.getMetrics(), EntityColumnPrefix.METRIC,
-          entityTable);
-      storeEvents(rowKey, te.getEvents(), EntityColumnPrefix.EVENT,
-          entityTable);
-      storeConfig(rowKey, te.getConfigs(), EntityColumnPrefix.CONFIG,
-          entityTable);
-      storeRelations(rowKey, te.getIsRelatedToEntities(),
-          EntityColumnPrefix.IS_RELATED_TO, entityTable);
-      storeRelations(rowKey, te.getRelatesToEntities(),
-          EntityColumnPrefix.RELATES_TO, entityTable);
-      break;
-    case SUBAPPLICATION_TABLE:
-      SubApplicationColumn.ID.store(rowKey, subApplicationTable, null,
-          te.getId());
-      SubApplicationColumn.TYPE.store(rowKey, subApplicationTable, null,
-          te.getType());
-      SubApplicationColumn.CREATED_TIME.store(rowKey, subApplicationTable, null,
-          te.getCreatedTime());
-      SubApplicationColumn.FLOW_VERSION.store(rowKey, subApplicationTable, null,
-          flowVersion);
-      storeInfo(rowKey, te.getInfo(), flowVersion,
-          SubApplicationColumnPrefix.INFO, subApplicationTable);
-      storeMetrics(rowKey, te.getMetrics(), SubApplicationColumnPrefix.METRIC,
-          subApplicationTable);
-      storeEvents(rowKey, te.getEvents(), SubApplicationColumnPrefix.EVENT,
-          subApplicationTable);
-      storeConfig(rowKey, te.getConfigs(), SubApplicationColumnPrefix.CONFIG,
-          subApplicationTable);
-      storeRelations(rowKey, te.getIsRelatedToEntities(),
-          SubApplicationColumnPrefix.IS_RELATED_TO, subApplicationTable);
-      storeRelations(rowKey, te.getRelatesToEntities(),
-          SubApplicationColumnPrefix.RELATES_TO, subApplicationTable);
-      break;
-    default:
-      LOG.info("Invalid table name provided.");
-      break;
-    }
-  }
-
-  /**
-   * stores the info information from {@linkplain TimelineEntity}.
-   */
-  private <T> void storeInfo(byte[] rowKey, Map<String, Object> info,
-      String flowVersion, ColumnPrefix<T> columnPrefix,
-      TypedBufferedMutator<T> table) throws IOException {
-    if (info != null) {
-      for (Map.Entry<String, Object> entry : info.entrySet()) {
-        columnPrefix.store(rowKey, table,
-            stringKeyConverter.encode(entry.getKey()), null, entry.getValue());
-      }
-    }
-  }
-
-  /**
-   * stores the config information from {@linkplain TimelineEntity}.
-   */
-  private <T> void storeConfig(byte[] rowKey, Map<String, String> config,
-      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
-      throws IOException {
-    if (config != null) {
-      for (Map.Entry<String, String> entry : config.entrySet()) {
-        byte[] configKey = stringKeyConverter.encode(entry.getKey());
-        columnPrefix.store(rowKey, table, configKey, null, entry.getValue());
-      }
-    }
-  }
-
-  /**
-   * stores the {@linkplain TimelineMetric} information from the
-   * {@linkplain TimelineEvent} object.
-   */
-  private <T> void storeMetrics(byte[] rowKey, Set<TimelineMetric> metrics,
-      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
-      throws IOException {
-    if (metrics != null) {
-      for (TimelineMetric metric : metrics) {
-        byte[] metricColumnQualifier =
-            stringKeyConverter.encode(metric.getId());
-        Map<Long, Number> timeseries = metric.getValues();
-        for (Map.Entry<Long, Number> timeseriesEntry : timeseries.entrySet()) {
-          Long timestamp = timeseriesEntry.getKey();
-          columnPrefix.store(rowKey, table, metricColumnQualifier, timestamp,
-              timeseriesEntry.getValue());
-        }
-      }
-    }
-  }
-
-  /**
-   * Stores the events from the {@linkplain TimelineEvent} object.
-   */
-  private <T> void storeEvents(byte[] rowKey, Set<TimelineEvent> events,
-      ColumnPrefix<T> columnPrefix, TypedBufferedMutator<T> table)
-      throws IOException {
-    if (events != null) {
-      for (TimelineEvent event : events) {
-        if (event != null) {
-          String eventId = event.getId();
-          if (eventId != null) {
-            long eventTimestamp = event.getTimestamp();
-            // if the timestamp is not set, use the current timestamp
-            if (eventTimestamp == TimelineEvent.INVALID_TIMESTAMP) {
-              LOG.warn("timestamp is not set for event " + eventId +
-                  "! Using the current timestamp");
-              eventTimestamp = System.currentTimeMillis();
-            }
-            Map<String, Object> eventInfo = event.getInfo();
-            if ((eventInfo == null) || (eventInfo.size() == 0)) {
-              byte[] columnQualifierBytes =
-                  new EventColumnName(eventId, eventTimestamp, null)
-                      .getColumnQualifier();
-              columnPrefix.store(rowKey, table, columnQualifierBytes, null,
-                  Separator.EMPTY_BYTES);
-            } else {
-              for (Map.Entry<String, Object> info : eventInfo.entrySet()) {
-                // eventId=infoKey
-                byte[] columnQualifierBytes =
-                    new EventColumnName(eventId, eventTimestamp, info.getKey())
-                        .getColumnQualifier();
-                columnPrefix.store(rowKey, table, columnQualifierBytes, null,
-                    info.getValue());
-              } // for info: eventInfo
-            }
-          }
-        }
-      } // event : events
-    }
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage
-   * .TimelineWriter#aggregate
-   * (org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity,
-   * org.apache
-   * .hadoop.yarn.server.timelineservice.storage.TimelineAggregationTrack)
-   */
-  @Override
-  public TimelineWriteResponse aggregate(TimelineEntity data,
-      TimelineAggregationTrack track) throws IOException {
-    return null;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.TimelineWriter#flush
-   * ()
-   */
-  @Override
-  public void flush() throws IOException {
-    // flush all buffered mutators
-    entityTable.flush();
-    appToFlowTable.flush();
-    applicationTable.flush();
-    flowRunTable.flush();
-    flowActivityTable.flush();
-    subApplicationTable.flush();
-  }
-
-  /**
-   * close the hbase connections The close APIs perform flushing and release any
-   * resources held.
-   */
-  @Override
-  protected void serviceStop() throws Exception {
-    if (entityTable != null) {
-      LOG.info("closing the entity table");
-      // The close API performs flushing and releases any resources held
-      entityTable.close();
-    }
-    if (appToFlowTable != null) {
-      LOG.info("closing the app_flow table");
-      // The close API performs flushing and releases any resources held
-      appToFlowTable.close();
-    }
-    if (applicationTable != null) {
-      LOG.info("closing the application table");
-      applicationTable.close();
-    }
-    if (flowRunTable != null) {
-      LOG.info("closing the flow run table");
-      // The close API performs flushing and releases any resources held
-      flowRunTable.close();
-    }
-    if (flowActivityTable != null) {
-      LOG.info("closing the flowActivityTable table");
-      // The close API performs flushing and releases any resources held
-      flowActivityTable.close();
-    }
-    if (subApplicationTable != null) {
-      subApplicationTable.close();
-    }
-    if (conn != null) {
-      LOG.info("closing the hbase Connection");
-      conn.close();
-    }
-    super.serviceStop();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
deleted file mode 100644
index c9f7cec..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.cli.PosixParser;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.util.GenericOptionsParser;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This creates the schema for a hbase based backend for storing application
- * timeline information.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public final class TimelineSchemaCreator {
-  private TimelineSchemaCreator() {
-  }
-
-  final static String NAME = TimelineSchemaCreator.class.getSimpleName();
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TimelineSchemaCreator.class);
-  private static final String SKIP_EXISTING_TABLE_OPTION_SHORT = "s";
-  private static final String APP_METRICS_TTL_OPTION_SHORT = "ma";
-  private static final String SUB_APP_METRICS_TTL_OPTION_SHORT = "msa";
-  private static final String APP_TABLE_NAME_SHORT = "a";
-  private static final String SUB_APP_TABLE_NAME_SHORT = "sa";
-  private static final String APP_TO_FLOW_TABLE_NAME_SHORT = "a2f";
-  private static final String ENTITY_METRICS_TTL_OPTION_SHORT = "me";
-  private static final String ENTITY_TABLE_NAME_SHORT = "e";
-  private static final String HELP_SHORT = "h";
-  private static final String CREATE_TABLES_SHORT = "c";
-
-  public static void main(String[] args) throws Exception {
-
-    LOG.info("Starting the schema creation");
-    Configuration hbaseConf =
-        HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(
-            new YarnConfiguration());
-    // Grab input args and allow for -Dxyz style arguments
-    String[] otherArgs = new GenericOptionsParser(hbaseConf, args)
-        .getRemainingArgs();
-
-    // Grab the arguments we're looking for.
-    CommandLine commandLine = parseArgs(otherArgs);
-
-    if (commandLine.hasOption(HELP_SHORT)) {
-      // -help option has the highest precedence
-      printUsage();
-    } else if (commandLine.hasOption(CREATE_TABLES_SHORT)) {
-      // Grab the entityTableName argument
-      String entityTableName = commandLine.getOptionValue(
-          ENTITY_TABLE_NAME_SHORT);
-      if (StringUtils.isNotBlank(entityTableName)) {
-        hbaseConf.set(EntityTable.TABLE_NAME_CONF_NAME, entityTableName);
-      }
-      // Grab the entity metrics TTL
-      String entityTableMetricsTTL = commandLine.getOptionValue(
-          ENTITY_METRICS_TTL_OPTION_SHORT);
-      if (StringUtils.isNotBlank(entityTableMetricsTTL)) {
-        int entityMetricsTTL = Integer.parseInt(entityTableMetricsTTL);
-        new EntityTable().setMetricsTTL(entityMetricsTTL, hbaseConf);
-      }
-      // Grab the appToflowTableName argument
-      String appToflowTableName = commandLine.getOptionValue(
-          APP_TO_FLOW_TABLE_NAME_SHORT);
-      if (StringUtils.isNotBlank(appToflowTableName)) {
-        hbaseConf.set(AppToFlowTable.TABLE_NAME_CONF_NAME, appToflowTableName);
-      }
-      // Grab the applicationTableName argument
-      String applicationTableName = commandLine.getOptionValue(
-          APP_TABLE_NAME_SHORT);
-      if (StringUtils.isNotBlank(applicationTableName)) {
-        hbaseConf.set(ApplicationTable.TABLE_NAME_CONF_NAME,
-            applicationTableName);
-      }
-      // Grab the application metrics TTL
-      String applicationTableMetricsTTL = commandLine.getOptionValue(
-          APP_METRICS_TTL_OPTION_SHORT);
-      if (StringUtils.isNotBlank(applicationTableMetricsTTL)) {
-        int appMetricsTTL = Integer.parseInt(applicationTableMetricsTTL);
-        new ApplicationTable().setMetricsTTL(appMetricsTTL, hbaseConf);
-      }
-
-      // Grab the subApplicationTableName argument
-      String subApplicationTableName = commandLine.getOptionValue(
-          SUB_APP_TABLE_NAME_SHORT);
-      if (StringUtils.isNotBlank(subApplicationTableName)) {
-        hbaseConf.set(SubApplicationTable.TABLE_NAME_CONF_NAME,
-            subApplicationTableName);
-      }
-      // Grab the subApplication metrics TTL
-      String subApplicationTableMetricsTTL = commandLine
-          .getOptionValue(SUB_APP_METRICS_TTL_OPTION_SHORT);
-      if (StringUtils.isNotBlank(subApplicationTableMetricsTTL)) {
-        int subAppMetricsTTL = Integer.parseInt(subApplicationTableMetricsTTL);
-        new SubApplicationTable().setMetricsTTL(subAppMetricsTTL, hbaseConf);
-      }
-
-      // create all table schemas in hbase
-      final boolean skipExisting = commandLine.hasOption(
-          SKIP_EXISTING_TABLE_OPTION_SHORT);
-      createAllSchemas(hbaseConf, skipExisting);
-    } else {
-      // print usage information if -create is not specified
-      printUsage();
-    }
-  }
-
-  /**
-   * Parse command-line arguments.
-   *
-   * @param args
-   *          command line arguments passed to program.
-   * @return parsed command line.
-   * @throws ParseException
-   */
-  private static CommandLine parseArgs(String[] args) throws ParseException {
-    Options options = new Options();
-
-    // Input
-    Option o = new Option(HELP_SHORT, "help", false, "print help information");
-    o.setRequired(false);
-    options.addOption(o);
-
-    o = new Option(CREATE_TABLES_SHORT, "create", false,
-        "a mandatory option to create hbase tables");
-    o.setRequired(false);
-    options.addOption(o);
-
-    o = new Option(ENTITY_TABLE_NAME_SHORT, "entityTableName", true,
-        "entity table name");
-    o.setArgName("entityTableName");
-    o.setRequired(false);
-    options.addOption(o);
-
-    o = new Option(ENTITY_METRICS_TTL_OPTION_SHORT, "entityMetricsTTL", true,
-        "TTL for metrics column family");
-    o.setArgName("entityMetricsTTL");
-    o.setRequired(false);
-    options.addOption(o);
-
-    o = new Option(APP_TO_FLOW_TABLE_NAME_SHORT, "appToflowTableName", true,
-        "app to flow table name");
-    o.setArgName("appToflowTableName");
-    o.setRequired(false);
-    options.addOption(o);
-
-    o = new Option(APP_TABLE_NAME_SHORT, "applicationTableName", true,
-        "application table name");
-    o.setArgName("applicationTableName");
-    o.setRequired(false);
-    options.addOption(o);
-
-    o = new Option(APP_METRICS_TTL_OPTION_SHORT, "applicationMetricsTTL", true,
-        "TTL for metrics column family");
-    o.setArgName("applicationMetricsTTL");
-    o.setRequired(false);
-    options.addOption(o);
-
-    o = new Option(SUB_APP_TABLE_NAME_SHORT, "subApplicationTableName", true,
-        "subApplication table name");
-    o.setArgName("subApplicationTableName");
-    o.setRequired(false);
-    options.addOption(o);
-
-    o = new Option(SUB_APP_METRICS_TTL_OPTION_SHORT, "subApplicationMetricsTTL",
-        true, "TTL for metrics column family");
-    o.setArgName("subApplicationMetricsTTL");
-    o.setRequired(false);
-    options.addOption(o);
-
-    // Options without an argument
-    // No need to set arg name since we do not need an argument here
-    o = new Option(SKIP_EXISTING_TABLE_OPTION_SHORT, "skipExistingTable",
-        false, "skip existing Hbase tables and continue to create new tables");
-    o.setRequired(false);
-    options.addOption(o);
-
-    CommandLineParser parser = new PosixParser();
-    CommandLine commandLine = null;
-    try {
-      commandLine = parser.parse(options, args);
-    } catch (Exception e) {
-      LOG.error("ERROR: " + e.getMessage() + "\n");
-      HelpFormatter formatter = new HelpFormatter();
-      formatter.printHelp(NAME + " ", options, true);
-      System.exit(-1);
-    }
-
-    return commandLine;
-  }
-
-  private static void printUsage() {
-    StringBuilder usage = new StringBuilder("Command Usage: \n");
-    usage.append("TimelineSchemaCreator [-help] Display help info" +
-        " for all commands. Or\n");
-    usage.append("TimelineSchemaCreator -create [OPTIONAL_OPTIONS]" +
-        " Create hbase tables.\n\n");
-    usage.append("The Optional options for creating tables include: \n");
-    usage.append("[-entityTableName <Entity Table Name>] " +
-        "The name of the Entity table\n");
-    usage.append("[-entityMetricsTTL <Entity Table Metrics TTL>]" +
-        " TTL for metrics in the Entity table\n");
-    usage.append("[-appToflowTableName <AppToflow Table Name>]" +
-        " The name of the AppToFlow table\n");
-    usage.append("[-applicationTableName <Application Table Name>]" +
-        " The name of the Application table\n");
-    usage.append("[-applicationMetricsTTL <Application Table Metrics TTL>]" +
-        " TTL for metrics in the Application table\n");
-    usage.append("[-subApplicationTableName <SubApplication Table Name>]" +
-        " The name of the SubApplication table\n");
-    usage.append("[-subApplicationMetricsTTL " +
-        " <SubApplication Table Metrics TTL>]" +
-        " TTL for metrics in the SubApplication table\n");
-    usage.append("[-skipExistingTable] Whether to skip existing" +
-        " hbase tables\n");
-    System.out.println(usage.toString());
-  }
-
-  /**
-   * Create all table schemas and log success or exception if failed.
-   * @param hbaseConf the hbase configuration to create tables with
-   * @param skipExisting whether to skip existing hbase tables
-   */
-  private static void createAllSchemas(Configuration hbaseConf,
-      boolean skipExisting) {
-    List<Exception> exceptions = new ArrayList<>();
-    try {
-      if (skipExisting) {
-        LOG.info("Will skip existing tables and continue on htable creation "
-            + "exceptions!");
-      }
-      createAllTables(hbaseConf, skipExisting);
-      LOG.info("Successfully created HBase schema. ");
-    } catch (IOException e) {
-      LOG.error("Error in creating hbase tables: ", e);
-      exceptions.add(e);
-    }
-
-    if (exceptions.size() > 0) {
-      LOG.warn("Schema creation finished with the following exceptions");
-      for (Exception e : exceptions) {
-        LOG.warn(e.getMessage());
-      }
-      System.exit(-1);
-    } else {
-      LOG.info("Schema creation finished successfully");
-    }
-  }
-
-  @VisibleForTesting
-  public static void createAllTables(Configuration hbaseConf,
-      boolean skipExisting) throws IOException {
-
-    Connection conn = null;
-    try {
-      conn = ConnectionFactory.createConnection(hbaseConf);
-      Admin admin = conn.getAdmin();
-      if (admin == null) {
-        throw new IOException("Cannot create table since admin is null");
-      }
-      try {
-        new EntityTable().createTable(admin, hbaseConf);
-      } catch (IOException e) {
-        if (skipExisting) {
-          LOG.warn("Skip and continue on: " + e.getMessage());
-        } else {
-          throw e;
-        }
-      }
-      try {
-        new AppToFlowTable().createTable(admin, hbaseConf);
-      } catch (IOException e) {
-        if (skipExisting) {
-          LOG.warn("Skip and continue on: " + e.getMessage());
-        } else {
-          throw e;
-        }
-      }
-      try {
-        new ApplicationTable().createTable(admin, hbaseConf);
-      } catch (IOException e) {
-        if (skipExisting) {
-          LOG.warn("Skip and continue on: " + e.getMessage());
-        } else {
-          throw e;
-        }
-      }
-      try {
-        new FlowRunTable().createTable(admin, hbaseConf);
-      } catch (IOException e) {
-        if (skipExisting) {
-          LOG.warn("Skip and continue on: " + e.getMessage());
-        } else {
-          throw e;
-        }
-      }
-      try {
-        new FlowActivityTable().createTable(admin, hbaseConf);
-      } catch (IOException e) {
-        if (skipExisting) {
-          LOG.warn("Skip and continue on: " + e.getMessage());
-        } else {
-          throw e;
-        }
-      }
-      try {
-        new SubApplicationTable().createTable(admin, hbaseConf);
-      } catch (IOException e) {
-        if (skipExisting) {
-          LOG.warn("Skip and continue on: " + e.getMessage());
-        } else {
-          throw e;
-        }
-      }
-    } finally {
-      if (conn != null) {
-        conn.close();
-      }
-    }
-  }
-
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
deleted file mode 100644
index 00eaa7e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumn.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link ApplicationTable}.
- */
-public enum ApplicationColumn implements Column<ApplicationTable> {
-
-  /**
-   * App id.
-   */
-  ID(ApplicationColumnFamily.INFO, "id"),
-
-  /**
-   * When the application was created.
-   */
-  CREATED_TIME(ApplicationColumnFamily.INFO, "created_time",
-      new LongConverter()),
-
-  /**
-   * The version of the flow that this app belongs to.
-   */
-  FLOW_VERSION(ApplicationColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper<ApplicationTable> column;
-  private final ColumnFamily<ApplicationTable> columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  private ApplicationColumn(ColumnFamily<ApplicationTable> columnFamily,
-      String columnQualifier) {
-    this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  private ApplicationColumn(ColumnFamily<ApplicationTable> columnFamily,
-      String columnQualifier, ValueConverter converter) {
-    this.columnFamily = columnFamily;
-    this.columnQualifier = columnQualifier;
-    // Future-proof by ensuring the right column prefix hygiene.
-    this.columnQualifierBytes =
-        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-    this.column = new ColumnHelper<ApplicationTable>(columnFamily, converter);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-    return columnQualifier;
-  }
-
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<ApplicationTable> tableMutator, Long timestamp,
-      Object inputValue, Attribute... attributes) throws IOException {
-    column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-        inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-    return column.readResult(result, columnQualifierBytes);
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-    return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
deleted file mode 100644
index 97e5f7b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnFamily.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents the application table column families.
- */
-public enum ApplicationColumnFamily implements ColumnFamily<ApplicationTable> {
-
-  /**
-   * Info column family houses known columns, specifically ones included in
-   * columnfamily filters.
-   */
-  INFO("i"),
-
-  /**
-   * Configurations are in a separate column family for two reasons: a) the size
-   * of the config values can be very large and b) we expect that config values
-   * are often separately accessed from other metrics and info columns.
-   */
-  CONFIGS("c"),
-
-  /**
-   * Metrics have a separate column family, because they have a separate TTL.
-   */
-  METRICS("m");
-
-  /**
-   * Byte representation of this column family.
-   */
-  private final byte[] bytes;
-
-  /**
-   * @param value create a column family with this name. Must be lower case and
-   *          without spaces.
-   */
-  private ApplicationColumnFamily(String value) {
-    // column families should be lower case and not contain any spaces.
-    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
-  }
-
-  public byte[] getBytes() {
-    return Bytes.copy(bytes);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
deleted file mode 100644
index 8297dc5..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationColumnPrefix.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies partially qualified columns for the application table.
- */
-public enum ApplicationColumnPrefix implements ColumnPrefix<ApplicationTable> {
-
-  /**
-   * To store TimelineEntity getIsRelatedToEntities values.
-   */
-  IS_RELATED_TO(ApplicationColumnFamily.INFO, "s"),
-
-  /**
-   * To store TimelineEntity getRelatesToEntities values.
-   */
-  RELATES_TO(ApplicationColumnFamily.INFO, "r"),
-
-  /**
-   * To store TimelineEntity info values.
-   */
-  INFO(ApplicationColumnFamily.INFO, "i"),
-
-  /**
-   * Lifecycle events for an application.
-   */
-  EVENT(ApplicationColumnFamily.INFO, "e"),
-
-  /**
-   * Config column stores configuration with config key as the column name.
-   */
-  CONFIG(ApplicationColumnFamily.CONFIGS, null),
-
-  /**
-   * Metrics are stored with the metric name as the column name.
-   */
-  METRIC(ApplicationColumnFamily.METRICS, null, new LongConverter());
-
-  private final ColumnHelper<ApplicationTable> column;
-  private final ColumnFamily<ApplicationTable> columnFamily;
-
-  /**
-   * Can be null for those cases where the provided column qualifier is the
-   * entire column name.
-   */
-  private final String columnPrefix;
-  private final byte[] columnPrefixBytes;
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   */
-  private ApplicationColumnPrefix(ColumnFamily<ApplicationTable> columnFamily,
-      String columnPrefix) {
-    this(columnFamily, columnPrefix, GenericConverter.getInstance());
-  }
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   * @param converter used to encode/decode values to be stored in HBase for
-   * this column prefix.
-   */
-  private ApplicationColumnPrefix(ColumnFamily<ApplicationTable> columnFamily,
-      String columnPrefix, ValueConverter converter) {
-    column = new ColumnHelper<ApplicationTable>(columnFamily, converter);
-    this.columnFamily = columnFamily;
-    this.columnPrefix = columnPrefix;
-    if (columnPrefix == null) {
-      this.columnPrefixBytes = null;
-    } else {
-      // Future-proof by ensuring the right column prefix hygiene.
-      this.columnPrefixBytes =
-          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
-    }
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnPrefix() {
-    return columnPrefix;
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<ApplicationTable> tableMutator, byte[] qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        attributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<ApplicationTable> tableMutator, String qualifier,
-      Long timestamp, Object inputValue, Attribute...attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        attributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
-   */
-  public Object readResult(Result result, String qualifier) throws IOException {
-    byte[] columnQualifier =
-        ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier);
-    return column.readResult(result, columnQualifier);
-  }
-
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResults(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K> Map<K, Object> readResults(Result result,
-      KeyConverter<K> keyConverter) throws IOException {
-    return column.readResults(result, columnPrefixBytes, keyConverter);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
-      readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
-      throws IOException {
-    return column.readResultsWithTimestamps(result, columnPrefixBytes,
-        keyConverter);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
deleted file mode 100644
index e89a6a7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKey.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.util.List;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the application table.
- */
-public class ApplicationRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final ApplicationRowKeyConverter appRowKeyConverter =
-      new ApplicationRowKeyConverter();
-
-  public ApplicationRowKey(String clusterId, String userId, String flowName,
-      Long flowRunId, String appId) {
-    this.clusterId = clusterId;
-    this.userId = userId;
-    this.flowName = flowName;
-    this.flowRunId = flowRunId;
-    this.appId = appId;
-  }
-
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  public String getUserId() {
-    return userId;
-  }
-
-  public String getFlowName() {
-    return flowName;
-  }
-
-  public Long getFlowRunId() {
-    return flowRunId;
-  }
-
-  public String getAppId() {
-    return appId;
-  }
-
-  /**
-   * Constructs a row key for the application table as follows:
-   * {@code clusterId!userName!flowName!flowRunId!AppId}.
-   *
-   * @return byte array with the row key
-   */
-  public byte[] getRowKey() {
-    return appRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey Byte representation of row key.
-   * @return An <cite>ApplicationRowKey</cite> object.
-   */
-  public static ApplicationRowKey parseRowKey(byte[] rowKey) {
-    return new ApplicationRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Constructs a row key for the application table as follows:
-   * {@code clusterId!userName!flowName!flowRunId!AppId}.
-   * @return String representation of row key.
-   */
-  public String getRowKeyAsString() {
-    return appRowKeyConverter.encodeAsString(this);
-  }
-
-  /**
-   * Given the encoded row key as string, returns the row key as an object.
-   * @param encodedRowKey String representation of row key.
-   * @return A <cite>ApplicationRowKey</cite> object.
-   */
-  public static ApplicationRowKey parseRowKeyFromString(String encodedRowKey) {
-    return new ApplicationRowKeyConverter().decodeFromString(encodedRowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for application table. The row key is of the
-   * form: clusterId!userName!flowName!flowRunId!appId. flowRunId is a long,
-   * appId is encoded and decoded using {@link AppIdKeyConverter} and rest are
-   * strings.
-   * <p>
-   */
-  final private static class ApplicationRowKeyConverter implements
-      KeyConverter<ApplicationRowKey>, KeyConverterToString<ApplicationRowKey> {
-
-    private final KeyConverter<String> appIDKeyConverter =
-        new AppIdKeyConverter();
-
-    /**
-     * Intended for use in ApplicationRowKey only.
-     */
-    private ApplicationRowKeyConverter() {
-    }
-
-    /**
-     * Application row key is of the form
-     * clusterId!userName!flowName!flowRunId!appId with each segment separated
-     * by !. The sizes below indicate sizes of each one of these segements in
-     * sequence. clusterId, userName and flowName are strings. flowrunId is a
-     * long hence 8 bytes in size. app id is represented as 12 bytes with
-     * cluster timestamp part of appid takes 8 bytes(long) and seq id takes 4
-     * bytes(int). Strings are variable in size (i.e. end whenever separator is
-     * encountered). This is used while decoding and helps in determining where
-     * to split.
-     */
-    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
-        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
-        AppIdKeyConverter.getKeySize() };
-
-    /*
-     * (non-Javadoc)
-     *
-     * Encodes ApplicationRowKey object into a byte array with each
-     * component/field in ApplicationRowKey separated by Separator#QUALIFIERS.
-     * This leads to an application table row key of the form
-     * clusterId!userName!flowName!flowRunId!appId If flowRunId in passed
-     * ApplicationRowKey object is null (and the fields preceding it i.e.
-     * clusterId, userId and flowName are not null), this returns a row key
-     * prefix of the form clusterId!userName!flowName! and if appId in
-     * ApplicationRowKey is null (other 4 components all are not null), this
-     * returns a row key prefix of the form
-     * clusterId!userName!flowName!flowRunId! flowRunId is inverted while
-     * encoding as it helps maintain a descending order for row keys in the
-     * application table.
-     *
-     * @see
-     * org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#encode(java.lang.Object)
-     */
-    @Override
-    public byte[] encode(ApplicationRowKey rowKey) {
-      byte[] cluster =
-          Separator.encode(rowKey.getClusterId(), Separator.SPACE,
-              Separator.TAB, Separator.QUALIFIERS);
-      byte[] user =
-          Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
-              Separator.QUALIFIERS);
-      byte[] flow =
-          Separator.encode(rowKey.getFlowName(), Separator.SPACE,
-              Separator.TAB, Separator.QUALIFIERS);
-      byte[] first = Separator.QUALIFIERS.join(cluster, user, flow);
-      // Note that flowRunId is a long, so we can't encode them all at the same
-      // time.
-      if (rowKey.getFlowRunId() == null) {
-        return Separator.QUALIFIERS.join(first, Separator.EMPTY_BYTES);
-      }
-      byte[] second =
-          Bytes.toBytes(LongConverter.invertLong(
-              rowKey.getFlowRunId()));
-      if (rowKey.getAppId() == null || rowKey.getAppId().isEmpty()) {
-        return Separator.QUALIFIERS.join(first, second, Separator.EMPTY_BYTES);
-      }
-      byte[] third = appIDKeyConverter.encode(rowKey.getAppId());
-      return Separator.QUALIFIERS.join(first, second, third);
-    }
-
-    /*
-     * (non-Javadoc)
-     *
-     * Decodes an application row key of the form
-     * clusterId!userName!flowName!flowRunId!appId represented in byte format
-     * and converts it into an ApplicationRowKey object.flowRunId is inverted
-     * while decoding as it was inverted while encoding.
-     *
-     * @see
-     * org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#decode(byte[])
-     */
-    @Override
-    public ApplicationRowKey decode(byte[] rowKey) {
-      byte[][] rowKeyComponents =
-          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
-      if (rowKeyComponents.length != 5) {
-        throw new IllegalArgumentException("the row key is not valid for "
-            + "an application");
-      }
-      String clusterId =
-          Separator.decode(Bytes.toString(rowKeyComponents[0]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String userId =
-          Separator.decode(Bytes.toString(rowKeyComponents[1]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String flowName =
-          Separator.decode(Bytes.toString(rowKeyComponents[2]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      Long flowRunId =
-          LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
-      String appId = appIDKeyConverter.decode(rowKeyComponents[4]);
-      return new ApplicationRowKey(clusterId, userId, flowName, flowRunId,
-          appId);
-    }
-
-    @Override
-    public String encodeAsString(ApplicationRowKey key) {
-      if (key.clusterId == null || key.userId == null || key.flowName == null
-          || key.flowRunId == null || key.appId == null) {
-        throw new IllegalArgumentException();
-      }
-      return TimelineReaderUtils
-          .joinAndEscapeStrings(new String[] {key.clusterId, key.userId,
-              key.flowName, key.flowRunId.toString(), key.appId});
-    }
-
-    @Override
-    public ApplicationRowKey decodeFromString(String encodedRowKey) {
-      List<String> split = TimelineReaderUtils.split(encodedRowKey);
-      if (split == null || split.size() != 5) {
-        throw new IllegalArgumentException(
-            "Invalid row key for application table.");
-      }
-      Long flowRunId = Long.valueOf(split.get(3));
-      return new ApplicationRowKey(split.get(0), split.get(1), split.get(2),
-          flowRunId, split.get(4));
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKeyPrefix.java
deleted file mode 100644
index f61b0e9..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationRowKeyPrefix.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-
-/**
- * Represents a partial rowkey (without flowName or without flowName and
- * flowRunId) for the application table.
- */
-public class ApplicationRowKeyPrefix extends ApplicationRowKey implements
-    RowKeyPrefix<ApplicationRowKey> {
-
-  /**
-   * Creates a prefix which generates the following rowKeyPrefixes for the
-   * application table: {@code clusterId!userName!flowName!}.
-   *
-   * @param clusterId the cluster on which applications ran
-   * @param userId the user that ran applications
-   * @param flowName the name of the flow that was run by the user on the
-   *          cluster
-   */
-  public ApplicationRowKeyPrefix(String clusterId, String userId,
-      String flowName) {
-    super(clusterId, userId, flowName, null, null);
-  }
-
-  /**
-   * Creates a prefix which generates the following rowKeyPrefixes for the
-   * application table: {@code clusterId!userName!flowName!flowRunId!}.
-   *
-   * @param clusterId identifying the cluster
-   * @param userId identifying the user
-   * @param flowName identifying the flow
-   * @param flowRunId identifying the instance of this flow
-   */
-  public ApplicationRowKeyPrefix(String clusterId, String userId,
-      String flowName, Long flowRunId) {
-    super(clusterId, userId, flowName, flowRunId, null);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.application.
-   * RowKeyPrefix#getRowKeyPrefix()
-   */
-  @Override
-  public byte[] getRowKeyPrefix() {
-    return super.getRowKey();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
deleted file mode 100644
index 4da720e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The application table as column families info, config and metrics. Info
- * stores information about a YARN application entity, config stores
- * configuration data of a YARN application, metrics stores the metrics of a
- * YARN application. This table is entirely analogous to the entity table but
- * created for better performance.
- *
- * Example application table record:
- *
- * <pre>
- * |-------------------------------------------------------------------------|
- * |  Row       | Column Family                | Column Family| Column Family|
- * |  key       | info                         | metrics      | config       |
- * |-------------------------------------------------------------------------|
- * | clusterId! | id:appId                     | metricId1:   | configKey1:  |
- * | userName!  |                              | metricValue1 | configValue1 |
- * | flowName!  | created_time:                | @timestamp1  |              |
- * | flowRunId! | 1392993084018                |              | configKey2:  |
- * | AppId      |                              | metriciD1:   | configValue2 |
- * |            | i!infoKey:                   | metricValue2 |              |
- * |            | infoValue                    | @timestamp2  |              |
- * |            |                              |              |              |
- * |            | r!relatesToKey:              | metricId2:   |              |
- * |            | id3=id4=id5                  | metricValue1 |              |
- * |            |                              | @timestamp2  |              |
- * |            | s!isRelatedToKey:            |              |              |
- * |            | id7=id9=id6                  |              |              |
- * |            |                              |              |              |
- * |            | e!eventId=timestamp=infoKey: |              |              |
- * |            | eventInfoValue               |              |              |
- * |            |                              |              |              |
- * |            | flowVersion:                 |              |              |
- * |            | versionValue                 |              |              |
- * |-------------------------------------------------------------------------|
- * </pre>
- */
-public class ApplicationTable extends BaseTable<ApplicationTable> {
-  /** application prefix. */
-  private static final String PREFIX =
-      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "application";
-
-  /** config param name that specifies the application table name. */
-  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
-
-  /**
-   * config param name that specifies the TTL for metrics column family in
-   * application table.
-   */
-  private static final String METRICS_TTL_CONF_NAME = PREFIX
-      + ".table.metrics.ttl";
-
-  /**
-   * config param name that specifies max-versions for metrics column family in
-   * entity table.
-   */
-  private static final String METRICS_MAX_VERSIONS =
-      PREFIX + ".table.metrics.max-versions";
-
-  /** default value for application table name. */
-  private static final String DEFAULT_TABLE_NAME =
-      "timelineservice.application";
-
-  /** default TTL is 30 days for metrics timeseries. */
-  private static final int DEFAULT_METRICS_TTL = 2592000;
-
-  /** default max number of versions. */
-  private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ApplicationTable.class);
-
-  public ApplicationTable() {
-    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
-   * (org.apache.hadoop.hbase.client.Admin,
-   * org.apache.hadoop.conf.Configuration)
-   */
-  public void createTable(Admin admin, Configuration hbaseConf)
-      throws IOException {
-
-    TableName table = getTableName(hbaseConf);
-    if (admin.tableExists(table)) {
-      // do not disable / delete existing table
-      // similar to the approach taken by map-reduce jobs when
-      // output directory exists
-      throw new IOException("Table " + table.getNameAsString()
-          + " already exists.");
-    }
-
-    HTableDescriptor applicationTableDescp = new HTableDescriptor(table);
-    HColumnDescriptor infoCF =
-        new HColumnDescriptor(ApplicationColumnFamily.INFO.getBytes());
-    infoCF.setBloomFilterType(BloomType.ROWCOL);
-    applicationTableDescp.addFamily(infoCF);
-
-    HColumnDescriptor configCF =
-        new HColumnDescriptor(ApplicationColumnFamily.CONFIGS.getBytes());
-    configCF.setBloomFilterType(BloomType.ROWCOL);
-    configCF.setBlockCacheEnabled(true);
-    applicationTableDescp.addFamily(configCF);
-
-    HColumnDescriptor metricsCF =
-        new HColumnDescriptor(ApplicationColumnFamily.METRICS.getBytes());
-    applicationTableDescp.addFamily(metricsCF);
-    metricsCF.setBlockCacheEnabled(true);
-    // always keep 1 version (the latest)
-    metricsCF.setMinVersions(1);
-    metricsCF.setMaxVersions(
-        hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
-    metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
-        DEFAULT_METRICS_TTL));
-    applicationTableDescp.setRegionSplitPolicyClassName(
-        "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
-    applicationTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
-        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
-    admin.createTable(applicationTableDescp,
-        TimelineHBaseSchemaConstants.getUsernameSplits());
-    LOG.info("Status of table creation for " + table.getNameAsString() + "="
-        + admin.tableExists(table));
-  }
-
-  /**
-   * @param metricsTTL time to live parameter for the metrics in this table.
-   * @param hbaseConf configuration in which to set the metrics TTL config
-   *          variable.
-   */
-  public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
-    hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
deleted file mode 100644
index 03f508f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.server.timelineservice.storage.application
- * contains classes related to implementation for application table.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.storage.application;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
deleted file mode 100644
index 67497fc..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumn.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
-
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-import java.io.IOException;
-
-/**
- * Identifies fully qualified columns for the {@link AppToFlowTable}.
- */
-public enum AppToFlowColumn implements Column<AppToFlowTable> {
-
-  /**
-   * The flow ID.
-   */
-  FLOW_ID(AppToFlowColumnFamily.MAPPING, "flow_id"),
-
-  /**
-   * The flow run ID.
-   */
-  FLOW_RUN_ID(AppToFlowColumnFamily.MAPPING, "flow_run_id"),
-
-  /**
-   * The user.
-   */
-  USER_ID(AppToFlowColumnFamily.MAPPING, "user_id");
-
-  private final ColumnHelper<AppToFlowTable> column;
-  private final ColumnFamily<AppToFlowTable> columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  AppToFlowColumn(ColumnFamily<AppToFlowTable> columnFamily,
-      String columnQualifier) {
-    this.columnFamily = columnFamily;
-    this.columnQualifier = columnQualifier;
-    // Future-proof by ensuring the right column prefix hygiene.
-    this.columnQualifierBytes =
-        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-    this.column = new ColumnHelper<AppToFlowTable>(columnFamily);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-    return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-    return columnQualifierBytes.clone();
-  }
-
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<AppToFlowTable> tableMutator, Long timestamp,
-      Object inputValue, Attribute... attributes) throws IOException {
-    column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-        inputValue, attributes);
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-  public Object readResult(Result result) throws IOException {
-    return column.readResult(result, columnQualifierBytes);
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HADOOP-15235. Authentication Tokens should use HMAC instead of MAC (rkanter)

Posted by ha...@apache.org.
HADOOP-15235. Authentication Tokens should use HMAC instead of MAC (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/324e5a7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/324e5a7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/324e5a7c

Branch: refs/heads/HDFS-12996
Commit: 324e5a7cf2bdb6f93e7c6fd9023817528f243dcf
Parents: 84cea00
Author: Robert Kanter <rk...@apache.org>
Authored: Tue Feb 20 17:24:37 2018 -0800
Committer: Robert Kanter <rk...@apache.org>
Committed: Tue Feb 20 17:24:37 2018 -0800

----------------------------------------------------------------------
 .../security/authentication/util/Signer.java    | 22 +++++++++++++-------
 1 file changed, 14 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/324e5a7c/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
index aa63e40..e7b19a4 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
@@ -14,8 +14,11 @@
 package org.apache.hadoop.security.authentication.util;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.codec.binary.StringUtils;
 
-import java.nio.charset.Charset;
+import javax.crypto.Mac;
+import javax.crypto.spec.SecretKeySpec;
+import java.security.InvalidKeyException;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 
@@ -24,6 +27,7 @@ import java.security.NoSuchAlgorithmException;
  */
 public class Signer {
   private static final String SIGNATURE = "&s=";
+  private static final String SIGNING_ALGORITHM = "HmacSHA256";
 
   private SignerSecretProvider secretProvider;
 
@@ -86,25 +90,27 @@ public class Signer {
    */
   protected String computeSignature(byte[] secret, String str) {
     try {
-      MessageDigest md = MessageDigest.getInstance("SHA");
-      md.update(str.getBytes(Charset.forName("UTF-8")));
-      md.update(secret);
-      byte[] digest = md.digest();
-      return new Base64(0).encodeToString(digest);
-    } catch (NoSuchAlgorithmException ex) {
+      SecretKeySpec key = new SecretKeySpec((secret), SIGNING_ALGORITHM);
+      Mac mac = Mac.getInstance(SIGNING_ALGORITHM);
+      mac.init(key);
+      byte[] sig = mac.doFinal(StringUtils.getBytesUtf8(str));
+      return new Base64(0).encodeToString(sig);
+    } catch (NoSuchAlgorithmException | InvalidKeyException ex) {
       throw new RuntimeException("It should not happen, " + ex.getMessage(), ex);
     }
   }
 
   protected void checkSignatures(String rawValue, String originalSignature)
       throws SignerException {
+    byte[] orginalSignatureBytes = StringUtils.getBytesUtf8(originalSignature);
     boolean isValid = false;
     byte[][] secrets = secretProvider.getAllSecrets();
     for (int i = 0; i < secrets.length; i++) {
       byte[] secret = secrets[i];
       if (secret != null) {
         String currentSignature = computeSignature(secret, rawValue);
-        if (originalSignature.equals(currentSignature)) {
+        if (MessageDigest.isEqual(orginalSignatureBytes,
+            StringUtils.getBytesUtf8(currentSignature))) {
           isValid = true;
           break;
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar.

Posted by ha...@apache.org.
HADOOP-12897. KerberosAuthenticator.authenticate to include URL on IO failures. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0d3c877
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0d3c877
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0d3c877

Branch: refs/heads/HDFS-12996
Commit: b0d3c877e30312820124cac2eff737fddac9e484
Parents: 324e5a7
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Feb 20 18:18:58 2018 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Feb 20 18:18:58 2018 -0800

----------------------------------------------------------------------
 .../client/KerberosAuthenticator.java           | 80 +++++++++++++-------
 .../client/TestKerberosAuthenticator.java       | 29 +++++++
 .../hadoop/http/TestHttpServerWithSpengo.java   |  5 +-
 .../org/apache/hadoop/log/TestLogLevel.java     | 18 ++++-
 .../delegation/web/TestWebDelegationToken.java  |  4 +-
 5 files changed, 101 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0d3c877/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 942d13c..64d4330 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -13,6 +13,8 @@
  */
 package org.apache.hadoop.security.authentication.client;
 
+import com.google.common.annotations.VisibleForTesting;
+import java.lang.reflect.Constructor;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.server.HttpConstants;
 import org.apache.hadoop.security.authentication.util.AuthToken;
@@ -177,41 +179,65 @@ public class KerberosAuthenticator implements Authenticator {
    */
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
-    throws IOException, AuthenticationException {
+      throws IOException, AuthenticationException {
     if (!token.isSet()) {
       this.url = url;
       base64 = new Base64(0);
-      HttpURLConnection conn = token.openConnection(url, connConfigurator);
-      conn.setRequestMethod(AUTH_HTTP_METHOD);
-      conn.connect();
-      
-      boolean needFallback = false;
-      if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
-        LOG.debug("JDK performed authentication on our behalf.");
-        // If the JDK already did the SPNEGO back-and-forth for
-        // us, just pull out the token.
-        AuthenticatedURL.extractToken(conn, token);
-        if (isTokenKerberos(token)) {
-          return;
+      try {
+        HttpURLConnection conn = token.openConnection(url, connConfigurator);
+        conn.setRequestMethod(AUTH_HTTP_METHOD);
+        conn.connect();
+
+        boolean needFallback = false;
+        if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
+          LOG.debug("JDK performed authentication on our behalf.");
+          // If the JDK already did the SPNEGO back-and-forth for
+          // us, just pull out the token.
+          AuthenticatedURL.extractToken(conn, token);
+          if (isTokenKerberos(token)) {
+            return;
+          }
+          needFallback = true;
         }
-        needFallback = true;
-      }
-      if (!needFallback && isNegotiate(conn)) {
-        LOG.debug("Performing our own SPNEGO sequence.");
-        doSpnegoSequence(token);
-      } else {
-        LOG.debug("Using fallback authenticator sequence.");
-        Authenticator auth = getFallBackAuthenticator();
-        // Make sure that the fall back authenticator have the same
-        // ConnectionConfigurator, since the method might be overridden.
-        // Otherwise the fall back authenticator might not have the information
-        // to make the connection (e.g., SSL certificates)
-        auth.setConnectionConfigurator(connConfigurator);
-        auth.authenticate(url, token);
+        if (!needFallback && isNegotiate(conn)) {
+          LOG.debug("Performing our own SPNEGO sequence.");
+          doSpnegoSequence(token);
+        } else {
+          LOG.debug("Using fallback authenticator sequence.");
+          Authenticator auth = getFallBackAuthenticator();
+          // Make sure that the fall back authenticator have the same
+          // ConnectionConfigurator, since the method might be overridden.
+          // Otherwise the fall back authenticator might not have the
+          // information to make the connection (e.g., SSL certificates)
+          auth.setConnectionConfigurator(connConfigurator);
+          auth.authenticate(url, token);
+        }
+      } catch (IOException ex){
+        throw wrapExceptionWithMessage(ex,
+            "Error while authenticating with endpoint: " + url);
+      } catch (AuthenticationException ex){
+        throw wrapExceptionWithMessage(ex,
+            "Error while authenticating with endpoint: " + url);
       }
     }
   }
 
+  @VisibleForTesting
+   static <T extends Exception> T wrapExceptionWithMessage(
+      T exception, String msg) {
+    Class<? extends Throwable> exceptionClass = exception.getClass();
+    try {
+      Constructor<? extends Throwable> ctor = exceptionClass
+          .getConstructor(String.class);
+      Throwable t = ctor.newInstance(msg);
+      return (T) (t.initCause(exception));
+    } catch (Throwable e) {
+      LOG.debug("Unable to wrap exception of type {}, it has "
+          + "no (String) constructor.", exceptionClass, e);
+      return exception;
+    }
+  }
+
   /**
    * If the specified URL does not support SPNEGO authentication, a fallback {@link Authenticator} will be used.
    * <p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0d3c877/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
index 7db53ba..4aabb34 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
@@ -20,6 +20,9 @@ import static org.apache.hadoop.security.authentication.server.KerberosAuthentic
 import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.KEYTAB;
 import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.NAME_RULES;
 
+import java.io.IOException;
+import java.nio.charset.CharacterCodingException;
+import javax.security.sasl.AuthenticationException;
 import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
@@ -218,4 +221,30 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
     });
   }
 
+  @Test(timeout = 60000)
+  public void testWrapExceptionWithMessage() {
+    IOException ex;
+    ex = new IOException("Induced exception");
+    ex = KerberosAuthenticator.wrapExceptionWithMessage(ex, "Error while "
+        + "authenticating with endpoint: localhost");
+    Assert.assertEquals("Induced exception", ex.getCause().getMessage());
+    Assert.assertEquals("Error while authenticating with endpoint: localhost",
+        ex.getMessage());
+
+    ex = new AuthenticationException("Auth exception");
+    ex = KerberosAuthenticator.wrapExceptionWithMessage(ex, "Error while "
+        + "authenticating with endpoint: localhost");
+    Assert.assertEquals("Auth exception", ex.getCause().getMessage());
+    Assert.assertEquals("Error while authenticating with endpoint: localhost",
+        ex.getMessage());
+
+    // Test for Exception with  no (String) constructor
+    // redirect the LOG to and check log message
+    ex = new CharacterCodingException();
+    Exception ex2 = KerberosAuthenticator.wrapExceptionWithMessage(ex,
+        "Error while authenticating with endpoint: localhost");
+    Assert.assertTrue(ex instanceof CharacterCodingException);
+    Assert.assertTrue(ex.equals(ex2));
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0d3c877/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
index 8f5dd04..e1d7302 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java
@@ -383,8 +383,9 @@ public class TestHttpServerWithSpengo {
           Assert.fail("should fail with no credentials");
         } catch (AuthenticationException ae) {
           Assert.assertNotNull(ae.getCause());
-          Assert.assertEquals(GSSException.class, ae.getCause().getClass());
-          GSSException gsse = (GSSException)ae.getCause();
+          Assert.assertEquals(GSSException.class,
+              ae.getCause().getCause().getClass());
+          GSSException gsse = (GSSException)ae.getCause().getCause();
           Assert.assertEquals(GSSException.NO_CRED, gsse.getMajor());
         } catch (Throwable t) {
           Assert.fail("Unexpected exception" + t);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0d3c877/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
index 30bf726..16b4071 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
@@ -356,7 +356,10 @@ public class TestLogLevel extends KerberosSecurityTestcase {
       fail("A HTTPS Client should not have succeeded in connecting to a " +
           "HTTP server");
     } catch (SSLException e) {
-      GenericTestUtils.assertExceptionContains("Unrecognized SSL message", e);
+      GenericTestUtils.assertExceptionContains("Error while authenticating "
+          + "with endpoint", e);
+      GenericTestUtils.assertExceptionContains("Unrecognized SSL message", e
+          .getCause());
     }
   }
 
@@ -374,7 +377,10 @@ public class TestLogLevel extends KerberosSecurityTestcase {
       fail("A HTTPS Client should not have succeeded in connecting to a " +
           "HTTP server");
     } catch (SSLException e) {
-      GenericTestUtils.assertExceptionContains("Unrecognized SSL message", e);
+      GenericTestUtils.assertExceptionContains("Error while authenticating "
+          + "with endpoint", e);
+      GenericTestUtils.assertExceptionContains("Unrecognized SSL message", e
+          .getCause());
     }
   }
 
@@ -393,8 +399,10 @@ public class TestLogLevel extends KerberosSecurityTestcase {
       fail("A HTTP Client should not have succeeded in connecting to a " +
           "HTTPS server");
     } catch (SocketException e) {
+      GenericTestUtils.assertExceptionContains("Error while authenticating "
+          + "with endpoint", e);
       GenericTestUtils.assertExceptionContains(
-          "Unexpected end of file from server", e);
+          "Unexpected end of file from server", e.getCause());
     }
   }
 
@@ -413,8 +421,10 @@ public class TestLogLevel extends KerberosSecurityTestcase {
       fail("A HTTP Client should not have succeeded in connecting to a " +
           "HTTPS server");
     }  catch (SocketException e) {
+      GenericTestUtils.assertExceptionContains("Error while authenticating "
+          + "with endpoint", e);
       GenericTestUtils.assertExceptionContains(
-          "Unexpected end of file from server", e);
+          "Unexpected end of file from server", e.getCause());
     }
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0d3c877/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
index c564b97..1fcc6fa 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
@@ -364,7 +364,7 @@ public class TestWebDelegationToken {
         aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
         Assert.fail();
       } catch (Exception ex) {
-        Assert.assertTrue(ex.getMessage().contains("401"));
+        Assert.assertTrue(ex.getCause().getMessage().contains("401"));
       }
 
       aUrl.getDelegationToken(authURL, token, FOO_USER);
@@ -776,7 +776,7 @@ public class TestWebDelegationToken {
         aUrl.getDelegationToken(url, token, FOO_USER, doAsUser);
         Assert.fail();
       } catch (AuthenticationException ex) {
-        Assert.assertTrue(ex.getMessage().contains("GSSException"));
+        Assert.assertTrue(ex.getCause().getMessage().contains("GSSException"));
       }
 
       doAsKerberosUser("client", keytabFile.getAbsolutePath(),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
deleted file mode 100644
index d385108..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
+++ /dev/null
@@ -1,249 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies partially qualified columns for the entity table.
- */
-public enum EntityColumnPrefix implements ColumnPrefix<EntityTable> {
-
-  /**
-   * To store TimelineEntity getIsRelatedToEntities values.
-   */
-  IS_RELATED_TO(EntityColumnFamily.INFO, "s"),
-
-  /**
-   * To store TimelineEntity getRelatesToEntities values.
-   */
-  RELATES_TO(EntityColumnFamily.INFO, "r"),
-
-  /**
-   * To store TimelineEntity info values.
-   */
-  INFO(EntityColumnFamily.INFO, "i"),
-
-  /**
-   * Lifecycle events for an entity.
-   */
-  EVENT(EntityColumnFamily.INFO, "e", true),
-
-  /**
-   * Config column stores configuration with config key as the column name.
-   */
-  CONFIG(EntityColumnFamily.CONFIGS, null),
-
-  /**
-   * Metrics are stored with the metric name as the column name.
-   */
-  METRIC(EntityColumnFamily.METRICS, null, new LongConverter());
-
-  private final ColumnHelper<EntityTable> column;
-  private final ColumnFamily<EntityTable> columnFamily;
-
-  /**
-   * Can be null for those cases where the provided column qualifier is the
-   * entire column name.
-   */
-  private final String columnPrefix;
-  private final byte[] columnPrefixBytes;
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   */
-  EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
-      String columnPrefix) {
-    this(columnFamily, columnPrefix, false, GenericConverter.getInstance());
-  }
-
-  EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
-      String columnPrefix, boolean compondColQual) {
-    this(columnFamily, columnPrefix, compondColQual,
-        GenericConverter.getInstance());
-  }
-
-  EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
-      String columnPrefix, ValueConverter converter) {
-    this(columnFamily, columnPrefix, false, converter);
-  }
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   * @param converter used to encode/decode values to be stored in HBase for
-   * this column prefix.
-   */
-  EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
-      String columnPrefix, boolean compondColQual, ValueConverter converter) {
-    column = new ColumnHelper<EntityTable>(columnFamily, converter);
-    this.columnFamily = columnFamily;
-    this.columnPrefix = columnPrefix;
-    if (columnPrefix == null) {
-      this.columnPrefixBytes = null;
-    } else {
-      // Future-proof by ensuring the right column prefix hygiene.
-      this.columnPrefixBytes =
-          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
-    }
-  }
-
-  /**
-   * @return the column name value
-   */
-  public String getColumnPrefix() {
-    return columnPrefix;
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<EntityTable> tableMutator, String qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        attributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<EntityTable> tableMutator, byte[] qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        attributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
-   */
-  public Object readResult(Result result, String qualifier) throws IOException {
-    byte[] columnQualifier =
-        ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier);
-    return column.readResult(result, columnQualifier);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResults(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K> Map<K, Object> readResults(Result result,
-      KeyConverter<K> keyConverter) throws IOException {
-    return column.readResults(result, columnPrefixBytes, keyConverter);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
-      readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
-      throws IOException {
-    return column.readResultsWithTimestamps(result, columnPrefixBytes,
-        keyConverter);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
deleted file mode 100644
index b85a9b0..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
+++ /dev/null
@@ -1,299 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.util.List;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the entity table.
- */
-public class EntityRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final String appId;
-  private final String entityType;
-  private final Long entityIdPrefix;
-  private final String entityId;
-  private final EntityRowKeyConverter entityRowKeyConverter =
-      new EntityRowKeyConverter();
-
-  public EntityRowKey(String clusterId, String userId, String flowName,
-      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
-      String entityId) {
-    this.clusterId = clusterId;
-    this.userId = userId;
-    this.flowName = flowName;
-    this.flowRunId = flowRunId;
-    this.appId = appId;
-    this.entityType = entityType;
-    this.entityIdPrefix = entityIdPrefix;
-    this.entityId = entityId;
-  }
-
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  public String getUserId() {
-    return userId;
-  }
-
-  public String getFlowName() {
-    return flowName;
-  }
-
-  public Long getFlowRunId() {
-    return flowRunId;
-  }
-
-  public String getAppId() {
-    return appId;
-  }
-
-  public String getEntityType() {
-    return entityType;
-  }
-
-  public String getEntityId() {
-    return entityId;
-  }
-
-  public Long getEntityIdPrefix() {
-    return entityIdPrefix;
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
-   * Typically used while querying a specific entity.
-   *
-   * @return byte array with the row key.
-   */
-  public byte[] getRowKey() {
-    return entityRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   * @param rowKey byte representation of row key.
-   * @return An <cite>EntityRowKey</cite> object.
-   */
-  public static EntityRowKey parseRowKey(byte[] rowKey) {
-    return new EntityRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows:
-   * <p>
-   * {@code userName!clusterId!flowName!flowRunId!AppId!
-   * entityType!entityIdPrefix!entityId}.
-   * </p>
-   * @return String representation of row key.
-   */
-  public String getRowKeyAsString() {
-    return entityRowKeyConverter.encodeAsString(this);
-  }
-
-  /**
-   * Given the encoded row key as string, returns the row key as an object.
-   * @param encodedRowKey String representation of row key.
-   * @return A <cite>EntityRowKey</cite> object.
-   */
-  public static EntityRowKey parseRowKeyFromString(String encodedRowKey) {
-    return new EntityRowKeyConverter().decodeFromString(encodedRowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for entity table. The row key is of the form :
-   * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
-   * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
-   * rest are strings.
-   * <p>
-   */
-  final private static class EntityRowKeyConverter implements
-      KeyConverter<EntityRowKey>, KeyConverterToString<EntityRowKey> {
-
-    private final AppIdKeyConverter appIDKeyConverter = new AppIdKeyConverter();
-
-    private EntityRowKeyConverter() {
-    }
-
-    /**
-     * Entity row key is of the form
-     * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each
-     * segment separated by !. The sizes below indicate sizes of each one of
-     * these segments in sequence. clusterId, userName, flowName, entityType and
-     * entityId are strings. flowrunId is a long hence 8 bytes in size. app id
-     * is represented as 12 bytes with cluster timestamp part of appid being 8
-     * bytes (long) and seq id being 4 bytes(int). Strings are variable in size
-     * (i.e. end whenever separator is encountered). This is used while decoding
-     * and helps in determining where to split.
-     */
-    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
-        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
-        AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE,
-        Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE };
-
-    /*
-     * (non-Javadoc)
-     *
-     * Encodes EntityRowKey object into a byte array with each component/field
-     * in EntityRowKey separated by Separator#QUALIFIERS. This leads to an
-     * entity table row key of the form
-     * userName!clusterId!flowName!flowRunId!appId!entityType!entityId If
-     * entityType in passed EntityRowKey object is null (and the fields
-     * preceding it i.e. clusterId, userId and flowName, flowRunId and appId
-     * are not null), this returns a row key prefix of the form
-     * userName!clusterId!flowName!flowRunId!appId! and if entityId in
-     * EntityRowKey is null (other 6 components are not null), this returns a
-     * row key prefix of the form
-     * userName!clusterId!flowName!flowRunId!appId!entityType! flowRunId is
-     * inverted while encoding as it helps maintain a descending order for row
-     * keys in entity table.
-     *
-     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#encode(java.lang.Object)
-     */
-    @Override
-    public byte[] encode(EntityRowKey rowKey) {
-      byte[] user =
-          Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
-              Separator.QUALIFIERS);
-      byte[] cluster =
-          Separator.encode(rowKey.getClusterId(), Separator.SPACE,
-              Separator.TAB, Separator.QUALIFIERS);
-      byte[] flow =
-          Separator.encode(rowKey.getFlowName(), Separator.SPACE,
-              Separator.TAB, Separator.QUALIFIERS);
-      byte[] first = Separator.QUALIFIERS.join(user, cluster, flow);
-      // Note that flowRunId is a long, so we can't encode them all at the same
-      // time.
-      byte[] second =
-          Bytes.toBytes(LongConverter.invertLong(rowKey.getFlowRunId()));
-      byte[] third = appIDKeyConverter.encode(rowKey.getAppId());
-      if (rowKey.getEntityType() == null) {
-        return Separator.QUALIFIERS.join(first, second, third,
-            Separator.EMPTY_BYTES);
-      }
-      byte[] entityType =
-          Separator.encode(rowKey.getEntityType(), Separator.SPACE,
-              Separator.TAB, Separator.QUALIFIERS);
-
-      if (rowKey.getEntityIdPrefix() == null) {
-        return Separator.QUALIFIERS.join(first, second, third, entityType,
-            Separator.EMPTY_BYTES);
-      }
-
-      byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
-
-      if (rowKey.getEntityId() == null) {
-        return Separator.QUALIFIERS.join(first, second, third, entityType,
-            entityIdPrefix, Separator.EMPTY_BYTES);
-      }
-
-      byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
-          Separator.TAB, Separator.QUALIFIERS);
-
-      byte[] fourth =
-          Separator.QUALIFIERS.join(entityType, entityIdPrefix, entityId);
-
-      return Separator.QUALIFIERS.join(first, second, third, fourth);
-    }
-
-    /*
-     * (non-Javadoc)
-     *
-     * Decodes an application row key of the form
-     * userName!clusterId!flowName!flowRunId!appId!entityType!entityId
-     * represented in byte format and converts it into an EntityRowKey object.
-     * flowRunId is inverted while decoding as it was inverted while encoding.
-     *
-     * @see
-     * org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#decode(byte[])
-     */
-    @Override
-    public EntityRowKey decode(byte[] rowKey) {
-      byte[][] rowKeyComponents =
-          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
-      if (rowKeyComponents.length != 8) {
-        throw new IllegalArgumentException("the row key is not valid for "
-            + "an entity");
-      }
-      String userId =
-          Separator.decode(Bytes.toString(rowKeyComponents[0]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String clusterId =
-          Separator.decode(Bytes.toString(rowKeyComponents[1]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String flowName =
-          Separator.decode(Bytes.toString(rowKeyComponents[2]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      Long flowRunId =
-          LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
-      String appId = appIDKeyConverter.decode(rowKeyComponents[4]);
-      String entityType =
-          Separator.decode(Bytes.toString(rowKeyComponents[5]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-
-      Long entityPrefixId = Bytes.toLong(rowKeyComponents[6]);
-
-      String entityId =
-          Separator.decode(Bytes.toString(rowKeyComponents[7]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      return new EntityRowKey(clusterId, userId, flowName, flowRunId, appId,
-          entityType, entityPrefixId, entityId);
-    }
-
-    @Override
-    public String encodeAsString(EntityRowKey key) {
-      if (key.clusterId == null || key.userId == null || key.flowName == null
-          || key.flowRunId == null || key.appId == null
-          || key.entityType == null || key.entityIdPrefix == null
-          || key.entityId == null) {
-        throw new IllegalArgumentException();
-      }
-      return TimelineReaderUtils
-          .joinAndEscapeStrings(new String[] {key.clusterId, key.userId,
-              key.flowName, key.flowRunId.toString(), key.appId, key.entityType,
-              key.entityIdPrefix.toString(), key.entityId});
-    }
-
-    @Override
-    public EntityRowKey decodeFromString(String encodedRowKey) {
-      List<String> split = TimelineReaderUtils.split(encodedRowKey);
-      if (split == null || split.size() != 8) {
-        throw new IllegalArgumentException("Invalid row key for entity table.");
-      }
-      Long flowRunId = Long.valueOf(split.get(3));
-      Long entityIdPrefix = Long.valueOf(split.get(6));
-      return new EntityRowKey(split.get(0), split.get(1), split.get(2),
-          flowRunId, split.get(4), split.get(5), entityIdPrefix, split.get(7));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
deleted file mode 100644
index 47a1789..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-
-/**
- * Represents a partial rowkey without the entityId or without entityType and
- * entityId for the entity table.
- *
- */
-public class EntityRowKeyPrefix extends EntityRowKey implements
-    RowKeyPrefix<EntityRowKey> {
-
-  /**
-   * Creates a prefix which generates the following rowKeyPrefixes for the
-   * entity table:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!}.
-   * @param clusterId identifying the cluster
-   * @param userId identifying the user
-   * @param flowName identifying the flow
-   * @param flowRunId identifying the individual run of this flow
-   * @param appId identifying the application
-   * @param entityType which entity type
-   * @param entityIdPrefix for entityId
-   * @param entityId for an entity
-   */
-  public EntityRowKeyPrefix(String clusterId, String userId, String flowName,
-      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
-      String entityId) {
-    super(clusterId, userId, flowName, flowRunId, appId, entityType,
-        entityIdPrefix, entityId);
-  }
-
-  /**
-   * Creates a prefix which generates the following rowKeyPrefixes for the
-   * entity table:
-   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
-   *
-   * @param clusterId identifying the cluster
-   * @param userId identifying the user
-   * @param flowName identifying the flow
-   * @param flowRunId identifying the individual run of this flow
-   * @param appId identifying the application
-   */
-  public EntityRowKeyPrefix(String clusterId, String userId, String flowName,
-      Long flowRunId, String appId) {
-    this(clusterId, userId, flowName, flowRunId, appId, null, null, null);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.application.
-   * RowKeyPrefix#getRowKeyPrefix()
-   */
-  public byte[] getRowKeyPrefix() {
-    return super.getRowKey();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
deleted file mode 100644
index 988bba2..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The entity table as column families info, config and metrics. Info stores
- * information about a timeline entity object config stores configuration data
- * of a timeline entity object metrics stores the metrics of a timeline entity
- * object
- *
- * Example entity table record:
- *
- * <pre>
- * |-------------------------------------------------------------------------|
- * |  Row       | Column Family                | Column Family| Column Family|
- * |  key       | info                         | metrics      | config       |
- * |-------------------------------------------------------------------------|
- * | userName!  | id:entityId                  | metricId1:   | configKey1:  |
- * | clusterId! |                              | metricValue1 | configValue1 |
- * | flowName!  | type:entityType              | @timestamp1  |              |
- * | flowRunId! |                              |              | configKey2:  |
- * | AppId!     | created_time:                | metricId1:   | configValue2 |
- * | entityType!| 1392993084018                | metricValue2 |              |
- * | idPrefix!  |                              | @timestamp2  |              |
- * | entityId   | i!infoKey:                   |              |              |
- * |            | infoValue                    | metricId1:   |              |
- * |            |                              | metricValue1 |              |
- * |            | r!relatesToKey:              | @timestamp2  |              |
- * |            | id3=id4=id5                  |              |              |
- * |            |                              |              |              |
- * |            | s!isRelatedToKey             |              |              |
- * |            | id7=id9=id6                  |              |              |
- * |            |                              |              |              |
- * |            | e!eventId=timestamp=infoKey: |              |              |
- * |            | eventInfoValue               |              |              |
- * |            |                              |              |              |
- * |            | flowVersion:                 |              |              |
- * |            | versionValue                 |              |              |
- * |-------------------------------------------------------------------------|
- * </pre>
- */
-public class EntityTable extends BaseTable<EntityTable> {
-  /** entity prefix. */
-  private static final String PREFIX =
-      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "entity";
-
-  /** config param name that specifies the entity table name. */
-  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
-
-  /**
-   * config param name that specifies the TTL for metrics column family in
-   * entity table.
-   */
-  private static final String METRICS_TTL_CONF_NAME = PREFIX
-      + ".table.metrics.ttl";
-
-  /**
-   * config param name that specifies max-versions for metrics column family in
-   * entity table.
-   */
-  private static final String METRICS_MAX_VERSIONS =
-      PREFIX + ".table.metrics.max-versions";
-
-  /** default value for entity table name. */
-  public static final String DEFAULT_TABLE_NAME = "timelineservice.entity";
-
-  /** default TTL is 30 days for metrics timeseries. */
-  private static final int DEFAULT_METRICS_TTL = 2592000;
-
-  /** default max number of versions. */
-  private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(EntityTable.class);
-
-  public EntityTable() {
-    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
-   * (org.apache.hadoop.hbase.client.Admin,
-   * org.apache.hadoop.conf.Configuration)
-   */
-  public void createTable(Admin admin, Configuration hbaseConf)
-      throws IOException {
-
-    TableName table = getTableName(hbaseConf);
-    if (admin.tableExists(table)) {
-      // do not disable / delete existing table
-      // similar to the approach taken by map-reduce jobs when
-      // output directory exists
-      throw new IOException("Table " + table.getNameAsString()
-          + " already exists.");
-    }
-
-    HTableDescriptor entityTableDescp = new HTableDescriptor(table);
-    HColumnDescriptor infoCF =
-        new HColumnDescriptor(EntityColumnFamily.INFO.getBytes());
-    infoCF.setBloomFilterType(BloomType.ROWCOL);
-    entityTableDescp.addFamily(infoCF);
-
-    HColumnDescriptor configCF =
-        new HColumnDescriptor(EntityColumnFamily.CONFIGS.getBytes());
-    configCF.setBloomFilterType(BloomType.ROWCOL);
-    configCF.setBlockCacheEnabled(true);
-    entityTableDescp.addFamily(configCF);
-
-    HColumnDescriptor metricsCF =
-        new HColumnDescriptor(EntityColumnFamily.METRICS.getBytes());
-    entityTableDescp.addFamily(metricsCF);
-    metricsCF.setBlockCacheEnabled(true);
-    // always keep 1 version (the latest)
-    metricsCF.setMinVersions(1);
-    metricsCF.setMaxVersions(
-        hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
-    metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
-        DEFAULT_METRICS_TTL));
-    entityTableDescp.setRegionSplitPolicyClassName(
-        "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
-    entityTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
-        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
-    admin.createTable(entityTableDescp,
-        TimelineHBaseSchemaConstants.getUsernameSplits());
-    LOG.info("Status of table creation for " + table.getNameAsString() + "="
-        + admin.tableExists(table));
-  }
-
-  /**
-   * @param metricsTTL time to live parameter for the metricss in this table.
-   * @param hbaseConf configururation in which to set the metrics TTL config
-   *          variable.
-   */
-  public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
-    hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
deleted file mode 100644
index bb0e331..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.server.timelineservice.storage.entity
- * contains classes related to implementation for entity table.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
deleted file mode 100644
index 4e2cf2d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Identifies the compaction dimensions for the data in the {@link FlowRunTable}
- * .
- */
-public enum AggregationCompactionDimension {
-
-  /**
-   * the application id.
-   */
-  APPLICATION_ID((byte) 101);
-
-  private byte tagType;
-  private byte[] inBytes;
-
-  private AggregationCompactionDimension(byte tagType) {
-    this.tagType = tagType;
-    this.inBytes = Bytes.toBytes(this.name());
-  }
-
-  public Attribute getAttribute(String attributeValue) {
-    return new Attribute(this.name(), Bytes.toBytes(attributeValue));
-  }
-
-  public byte getTagType() {
-    return tagType;
-  }
-
-  public byte[] getInBytes() {
-    return this.inBytes.clone();
-  }
-
-  public static AggregationCompactionDimension
-      getAggregationCompactionDimension(String aggCompactDimStr) {
-    for (AggregationCompactionDimension aggDim : AggregationCompactionDimension
-        .values()) {
-      if (aggDim.name().equals(aggCompactDimStr)) {
-        return aggDim;
-      }
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java
deleted file mode 100644
index 40cdd2c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Identifies the attributes to be set for puts into the {@link FlowRunTable}.
- * The numbers used for tagType are prime numbers.
- */
-public enum AggregationOperation {
-
-  /**
-   * When the flow was started.
-   */
-  GLOBAL_MIN((byte) 71),
-
-  /**
-   * When it ended.
-   */
-  GLOBAL_MAX((byte) 73),
-
-  /**
-   * The metrics of the flow.
-   */
-  SUM((byte) 79),
-
-  /**
-   * application running.
-   */
-  SUM_FINAL((byte) 83),
-
-  /**
-   * Min value as per the latest timestamp
-   * seen for a given app.
-   */
-  LATEST_MIN((byte) 89),
-
-  /**
-   * Max value as per the latest timestamp
-   * seen for a given app.
-   */
-  LATEST_MAX((byte) 97);
-
-  private byte tagType;
-  private byte[] inBytes;
-
-  private AggregationOperation(byte tagType) {
-    this.tagType = tagType;
-    this.inBytes = Bytes.toBytes(this.name());
-  }
-
-  public Attribute getAttribute() {
-    return new Attribute(this.name(), this.inBytes);
-  }
-
-  public byte getTagType() {
-    return tagType;
-  }
-
-  public byte[] getInBytes() {
-    return this.inBytes.clone();
-  }
-
-  /**
-   * returns the AggregationOperation enum that represents that string.
-   * @param aggOpStr Aggregation operation.
-   * @return the AggregationOperation enum that represents that string
-   */
-  public static AggregationOperation getAggregationOperation(String aggOpStr) {
-    for (AggregationOperation aggOp : AggregationOperation.values()) {
-      if (aggOp.name().equals(aggOpStr)) {
-        return aggOp;
-      }
-    }
-    return null;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java
deleted file mode 100644
index d3de518..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-/**
- * Defines the attribute tuple to be set for puts into the {@link FlowRunTable}.
- */
-public class Attribute {
-  private final String name;
-  private final byte[] value;
-
-  public Attribute(String name, byte[] value) {
-    this.name = name;
-    this.value = value.clone();
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  public byte[] getValue() {
-    return value.clone();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
deleted file mode 100644
index f9eb5b4..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents the flow run table column families.
- */
-public enum FlowActivityColumnFamily
-    implements ColumnFamily<FlowActivityTable> {
-
-  /**
-   * Info column family houses known columns, specifically ones included in
-   * columnfamily filters.
-   */
-  INFO("i");
-
-  /**
-   * Byte representation of this column family.
-   */
-  private final byte[] bytes;
-
-  /**
-   * @param value
-   *          create a column family with this name. Must be lower case and
-   *          without spaces.
-   */
-  private FlowActivityColumnFamily(String value) {
-    // column families should be lower case and not contain any spaces.
-    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
-  }
-
-  public byte[] getBytes() {
-    return Bytes.copy(bytes);
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
deleted file mode 100644
index 706b002..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies partially qualified columns for the {@link FlowActivityTable}.
- */
-public enum FlowActivityColumnPrefix
-    implements ColumnPrefix<FlowActivityTable> {
-
-  /**
-   * To store run ids of the flows.
-   */
-  RUN_ID(FlowActivityColumnFamily.INFO, "r", null);
-
-  private final ColumnHelper<FlowActivityTable> column;
-  private final ColumnFamily<FlowActivityTable> columnFamily;
-
-  /**
-   * Can be null for those cases where the provided column qualifier is the
-   * entire column name.
-   */
-  private final String columnPrefix;
-  private final byte[] columnPrefixBytes;
-
-  private final AggregationOperation aggOp;
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily
-   *          that this column is stored in.
-   * @param columnPrefix
-   *          for this column.
-   */
-  private FlowActivityColumnPrefix(
-      ColumnFamily<FlowActivityTable> columnFamily, String columnPrefix,
-      AggregationOperation aggOp) {
-    this(columnFamily, columnPrefix, aggOp, false);
-  }
-
-  private FlowActivityColumnPrefix(
-      ColumnFamily<FlowActivityTable> columnFamily, String columnPrefix,
-      AggregationOperation aggOp, boolean compoundColQual) {
-    column = new ColumnHelper<FlowActivityTable>(columnFamily);
-    this.columnFamily = columnFamily;
-    this.columnPrefix = columnPrefix;
-    if (columnPrefix == null) {
-      this.columnPrefixBytes = null;
-    } else {
-      // Future-proof by ensuring the right column prefix hygiene.
-      this.columnPrefixBytes = Bytes.toBytes(Separator.SPACE
-          .encode(columnPrefix));
-    }
-    this.aggOp = aggOp;
-  }
-
-  /**
-   * @return the column name value
-   */
-  public String getColumnPrefix() {
-    return columnPrefix;
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifierPrefix);
-  }
-
-  public byte[] getColumnPrefixBytes() {
-    return columnPrefixBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-  public AggregationOperation getAttribute() {
-    return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, byte[], java.lang.Long, java.lang.Object,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
-   */
-  @Override
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<FlowActivityTable> tableMutator, byte[] qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-    Attribute[] combinedAttributes =
-        HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        combinedAttributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
-   */
-  public Object readResult(Result result, String qualifier) throws IOException {
-    byte[] columnQualifier = ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifier);
-    return column.readResult(result, columnQualifier);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResults(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K> Map<K, Object> readResults(Result result,
-      KeyConverter<K> keyConverter) throws IOException {
-    return column.readResults(result, columnPrefixBytes, keyConverter);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
-      readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
-      throws IOException {
-    return column.readResultsWithTimestamps(result, columnPrefixBytes,
-        keyConverter);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
-   */
-  @Override
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<FlowActivityTable> tableMutator, String qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-    Attribute[] combinedAttributes =
-        HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        combinedAttributes);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
deleted file mode 100644
index b8a5dba..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
+++ /dev/null
@@ -1,247 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.util.List;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the flow activity table.
- */
-public class FlowActivityRowKey {
-
-  private final String clusterId;
-  private final Long dayTs;
-  private final String userId;
-  private final String flowName;
-  private final FlowActivityRowKeyConverter
-      flowActivityRowKeyConverter = new FlowActivityRowKeyConverter();
-
-  /**
-   * @param clusterId identifying the cluster
-   * @param dayTs to be converted to the top of the day timestamp
-   * @param userId identifying user
-   * @param flowName identifying the flow
-   */
-  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
-      String flowName) {
-    this(clusterId, dayTs, userId, flowName, true);
-  }
-
-  /**
-   * @param clusterId identifying the cluster
-   * @param timestamp when the flow activity happened. May be converted to the
-   *          top of the day depending on the convertDayTsToTopOfDay argument.
-   * @param userId identifying user
-   * @param flowName identifying the flow
-   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
-   *          timestamp will be converted to the top-of-the day timestamp
-   */
-  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
-      String flowName, boolean convertDayTsToTopOfDay) {
-    this.clusterId = clusterId;
-    if (convertDayTsToTopOfDay && (timestamp != null)) {
-      this.dayTs = HBaseTimelineStorageUtils.getTopOfTheDayTimestamp(timestamp);
-    } else {
-      this.dayTs = timestamp;
-    }
-    this.userId = userId;
-    this.flowName = flowName;
-  }
-
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  public Long getDayTimestamp() {
-    return dayTs;
-  }
-
-  public String getUserId() {
-    return userId;
-  }
-
-  public String getFlowName() {
-    return flowName;
-  }
-
-  /**
-   * Constructs a row key for the flow activity table as follows:
-   * {@code clusterId!dayTimestamp!user!flowName}.
-   *
-   * @return byte array for the row key
-   */
-  public byte[] getRowKey() {
-    return flowActivityRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey Byte representation of row key.
-   * @return A <cite>FlowActivityRowKey</cite> object.
-   */
-  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
-    return new FlowActivityRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Constructs a row key for the flow activity table as follows:
-   * {@code clusterId!dayTimestamp!user!flowName}.
-   * @return String representation of row key
-   */
-  public String getRowKeyAsString() {
-    return flowActivityRowKeyConverter.encodeAsString(this);
-  }
-
-  /**
-   * Given the raw row key as string, returns the row key as an object.
-   * @param encodedRowKey String representation of row key.
-   * @return A <cite>FlowActivityRowKey</cite> object.
-   */
-  public static FlowActivityRowKey parseRowKeyFromString(String encodedRowKey) {
-    return new FlowActivityRowKeyConverter().decodeFromString(encodedRowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for flow activity table. The row key is of the
-   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
-   * timestamp) is a long and rest are strings.
-   * <p>
-   */
-  final private static class FlowActivityRowKeyConverter
-      implements KeyConverter<FlowActivityRowKey>,
-      KeyConverterToString<FlowActivityRowKey> {
-
-    private FlowActivityRowKeyConverter() {
-    }
-
-    /**
-     * The flow activity row key is of the form
-     * clusterId!dayTimestamp!user!flowName with each segment separated by !.
-     * The sizes below indicate sizes of each one of these segements in
-     * sequence. clusterId, user and flowName are strings. Top of the day
-     * timestamp is a long hence 8 bytes in size. Strings are variable in size
-     * (i.e. they end whenever separator is encountered). This is used while
-     * decoding and helps in determining where to split.
-     */
-    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
-        Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE };
-
-    /*
-     * (non-Javadoc)
-     *
-     * Encodes FlowActivityRowKey object into a byte array with each
-     * component/field in FlowActivityRowKey separated by Separator#QUALIFIERS.
-     * This leads to an flow activity table row key of the form
-     * clusterId!dayTimestamp!user!flowName. If dayTimestamp in passed
-     * FlowActivityRowKey object is null and clusterId is not null, then this
-     * returns a row key prefix as clusterId! and if userId in
-     * FlowActivityRowKey is null (and the fields preceding it i.e. clusterId
-     * and dayTimestamp are not null), this returns a row key prefix as
-     * clusterId!dayTimeStamp! dayTimestamp is inverted while encoding as it
-     * helps maintain a descending order for row keys in flow activity table.
-     *
-     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#encode(java.lang.Object)
-     */
-    @Override
-    public byte[] encode(FlowActivityRowKey rowKey) {
-      if (rowKey.getDayTimestamp() == null) {
-        return Separator.QUALIFIERS.join(Separator.encode(
-            rowKey.getClusterId(), Separator.SPACE, Separator.TAB,
-            Separator.QUALIFIERS), Separator.EMPTY_BYTES);
-      }
-      if (rowKey.getUserId() == null) {
-        return Separator.QUALIFIERS.join(Separator.encode(
-            rowKey.getClusterId(), Separator.SPACE, Separator.TAB,
-            Separator.QUALIFIERS), Bytes.toBytes(LongConverter
-            .invertLong(rowKey.getDayTimestamp())), Separator.EMPTY_BYTES);
-      }
-      return Separator.QUALIFIERS.join(Separator.encode(rowKey.getClusterId(),
-          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Bytes
-          .toBytes(LongConverter.invertLong(rowKey.getDayTimestamp())),
-          Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
-              Separator.QUALIFIERS), Separator.encode(rowKey.getFlowName(),
-              Separator.SPACE, Separator.TAB, Separator.QUALIFIERS));
-    }
-
-    /*
-     * (non-Javadoc)
-     *
-     * @see
-     * org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#decode(byte[])
-     */
-    @Override
-    public FlowActivityRowKey decode(byte[] rowKey) {
-      byte[][] rowKeyComponents =
-          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
-      if (rowKeyComponents.length != 4) {
-        throw new IllegalArgumentException("the row key is not valid for "
-            + "a flow activity");
-      }
-      String clusterId =
-          Separator.decode(Bytes.toString(rowKeyComponents[0]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      Long dayTs = LongConverter.invertLong(Bytes.toLong(rowKeyComponents[1]));
-      String userId =
-          Separator.decode(Bytes.toString(rowKeyComponents[2]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String flowName =
-          Separator.decode(Bytes.toString(rowKeyComponents[3]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      return new FlowActivityRowKey(clusterId, dayTs, userId, flowName);
-    }
-
-    @Override
-    public String encodeAsString(FlowActivityRowKey key) {
-      if (key.getDayTimestamp() == null) {
-        return TimelineReaderUtils
-            .joinAndEscapeStrings(new String[] {key.clusterId});
-      } else if (key.getUserId() == null) {
-        return TimelineReaderUtils.joinAndEscapeStrings(
-            new String[] {key.clusterId, key.dayTs.toString()});
-      } else if (key.getFlowName() == null) {
-        return TimelineReaderUtils.joinAndEscapeStrings(
-            new String[] {key.clusterId, key.dayTs.toString(), key.userId});
-      }
-      return TimelineReaderUtils.joinAndEscapeStrings(new String[] {
-          key.clusterId, key.dayTs.toString(), key.userId, key.flowName});
-    }
-
-    @Override
-    public FlowActivityRowKey decodeFromString(String encodedRowKey) {
-      List<String> split = TimelineReaderUtils.split(encodedRowKey);
-      if (split == null || split.size() != 4) {
-        throw new IllegalArgumentException(
-            "Invalid row key for flow activity.");
-      }
-      Long dayTs = Long.valueOf(split.get(1));
-      return new FlowActivityRowKey(split.get(0), dayTs, split.get(2),
-          split.get(3));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java
deleted file mode 100644
index eb88e54..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-
-/**
- * A prefix partial rowkey for flow activities.
- */
-public class FlowActivityRowKeyPrefix extends FlowActivityRowKey implements
-    RowKeyPrefix<FlowActivityRowKey> {
-
-  /**
-   * Constructs a row key prefix for the flow activity table as follows:
-   * {@code clusterId!dayTimestamp!}.
-   *
-   * @param clusterId Cluster Id.
-   * @param dayTs Start of the day timestamp.
-   */
-  public FlowActivityRowKeyPrefix(String clusterId, Long dayTs) {
-    super(clusterId, dayTs, null, null, false);
-  }
-
-  /**
-   * Constructs a row key prefix for the flow activity table as follows:
-   * {@code clusterId!}.
-   *
-   * @param clusterId identifying the cluster
-   */
-  public FlowActivityRowKeyPrefix(String clusterId) {
-    super(clusterId, null, null, null, false);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.application.
-   * RowKeyPrefix#getRowKeyPrefix()
-   */
-  public byte[] getRowKeyPrefix() {
-    return super.getRowKey();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java
deleted file mode 100644
index e646eb2..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The flow activity table has column family info
- * Stores the daily activity record for flows
- * Useful as a quick lookup of what flows were
- * running on a given day
- *
- * Example flow activity table record:
- *
- * <pre>
- * |-------------------------------------------|
- * |  Row key   | Column Family                |
- * |            | info                         |
- * |-------------------------------------------|
- * | clusterId! | r!runid1:version1            |
- * | inv Top of |                              |
- * | Day!       | r!runid2:version7            |
- * | userName!  |                              |
- * | flowName   |                              |
- * |-------------------------------------------|
- * </pre>
- */
-public class FlowActivityTable extends BaseTable<FlowActivityTable> {
-  /** flow activity table prefix. */
-  private static final String PREFIX =
-      YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".flowactivity";
-
-  /** config param name that specifies the flowactivity table name. */
-  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
-
-  /** default value for flowactivity table name. */
-  public static final String DEFAULT_TABLE_NAME =
-      "timelineservice.flowactivity";
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(FlowActivityTable.class);
-
-  /** default max number of versions. */
-  public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE;
-
-  public FlowActivityTable() {
-    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
-   * (org.apache.hadoop.hbase.client.Admin,
-   * org.apache.hadoop.conf.Configuration)
-   */
-  public void createTable(Admin admin, Configuration hbaseConf)
-      throws IOException {
-
-    TableName table = getTableName(hbaseConf);
-    if (admin.tableExists(table)) {
-      // do not disable / delete existing table
-      // similar to the approach taken by map-reduce jobs when
-      // output directory exists
-      throw new IOException("Table " + table.getNameAsString()
-          + " already exists.");
-    }
-
-    HTableDescriptor flowActivityTableDescp = new HTableDescriptor(table);
-    HColumnDescriptor infoCF =
-        new HColumnDescriptor(FlowActivityColumnFamily.INFO.getBytes());
-    infoCF.setBloomFilterType(BloomType.ROWCOL);
-    flowActivityTableDescp.addFamily(infoCF);
-    infoCF.setMinVersions(1);
-    infoCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS);
-
-    // TODO: figure the split policy before running in production
-    admin.createTable(flowActivityTableDescp);
-    LOG.info("Status of table creation for " + table.getNameAsString() + "="
-        + admin.tableExists(table));
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
deleted file mode 100644
index 3797faf..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies fully qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumn implements Column<FlowRunTable> {
-
-  /**
-   * When the flow was started. This is the minimum of currently known
-   * application start times.
-   */
-  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
-      AggregationOperation.GLOBAL_MIN, new LongConverter()),
-
-  /**
-   * When the flow ended. This is the maximum of currently known application end
-   * times.
-   */
-  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
-      AggregationOperation.GLOBAL_MAX, new LongConverter()),
-
-  /**
-   * The version of the flow that this flow belongs to.
-   */
-  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
-
-  private final ColumnHelper<FlowRunTable> column;
-  private final ColumnFamily<FlowRunTable> columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-  private final AggregationOperation aggOp;
-
-  private FlowRunColumn(ColumnFamily<FlowRunTable> columnFamily,
-      String columnQualifier, AggregationOperation aggOp) {
-    this(columnFamily, columnQualifier, aggOp,
-        GenericConverter.getInstance());
-  }
-
-  private FlowRunColumn(ColumnFamily<FlowRunTable> columnFamily,
-      String columnQualifier, AggregationOperation aggOp,
-      ValueConverter converter) {
-    this.columnFamily = columnFamily;
-    this.columnQualifier = columnQualifier;
-    this.aggOp = aggOp;
-    // Future-proof by ensuring the right column prefix hygiene.
-    this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
-        .encode(columnQualifier));
-    this.column = new ColumnHelper<FlowRunTable>(columnFamily, converter, true);
-  }
-
-  /**
-   * @return the column name value
-   */
-  private String getColumnQualifier() {
-    return columnQualifier;
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-    return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAggregationOperation() {
-    return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.Column#store
-   * (byte[], org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.Long, java.lang.Object,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<FlowRunTable> tableMutator, Long timestamp,
-      Object inputValue, Attribute... attributes) throws IOException {
-
-    Attribute[] combinedAttributes =
-        HBaseTimelineStorageUtils.combineAttributes(attributes, aggOp);
-    column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-        inputValue, combinedAttributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-    return column.readResult(result, columnQualifierBytes);
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java
deleted file mode 100644
index 8faf5f8..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents the flow run table column families.
- */
-public enum FlowRunColumnFamily implements ColumnFamily<FlowRunTable> {
-
-  /**
-   * Info column family houses known columns, specifically ones included in
-   * columnfamily filters.
-   */
-  INFO("i");
-
-  /**
-   * Byte representation of this column family.
-   */
-  private final byte[] bytes;
-
-  /**
-   * @param value
-   *          create a column family with this name. Must be lower case and
-   *          without spaces.
-   */
-  private FlowRunColumnFamily(String value) {
-    // column families should be lower case and not contain any spaces.
-    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
-  }
-
-  public byte[] getBytes() {
-    return Bytes.copy(bytes);
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: YARN-7942. Add check for JAAS configuration for Yarn Service. Contributed by Billie Rinaldi

Posted by ha...@apache.org.
YARN-7942. Add check for JAAS configuration for Yarn Service.
           Contributed by Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95904f6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95904f6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95904f6b

Branch: refs/heads/HDFS-12996
Commit: 95904f6b3ccd1d167088086472eabdd85b2d148d
Parents: 1909690
Author: Eric Yang <ey...@apache.org>
Authored: Thu Feb 22 16:12:40 2018 -0500
Committer: Eric Yang <ey...@apache.org>
Committed: Thu Feb 22 16:12:40 2018 -0500

----------------------------------------------------------------------
 .../client/impl/zk/RegistrySecurity.java        | 44 +++++++++++++++-----
 1 file changed, 33 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95904f6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
index bb829d8..5c6c983 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
@@ -736,8 +736,10 @@ public class RegistrySecurity extends AbstractService {
    * Apply the security environment to this curator instance. This
    * may include setting up the ZK system properties for SASL
    * @param builder curator builder
+   * @throws IOException if jaas configuration can't be generated or found
    */
-  public void applySecurityEnvironment(CuratorFrameworkFactory.Builder builder) {
+  public void applySecurityEnvironment(CuratorFrameworkFactory.Builder
+      builder) throws IOException {
 
     if (isSecureRegistry()) {
       switch (access) {
@@ -752,16 +754,36 @@ public class RegistrySecurity extends AbstractService {
           break;
 
         case sasl:
-          JaasConfiguration jconf =
-              new JaasConfiguration(jaasClientEntry, principal, keytab);
-          javax.security.auth.login.Configuration.setConfiguration(jconf);
-          setSystemPropertyIfUnset(ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY,
-              "true");
-          setSystemPropertyIfUnset(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
-              jaasClientEntry);
-          LOG.info(
-              "Enabling ZK sasl client: jaasClientEntry = " + jaasClientEntry
-                  + ", principal = " + principal + ", keytab = " + keytab);
+          String existingJaasConf = System.getProperty(
+              "java.security.auth.login.config");
+          if (existingJaasConf == null || existingJaasConf.isEmpty()) {
+            if (principal == null || keytab == null) {
+              throw new IOException("SASL is configured for registry, " +
+                  "but neither keytab/principal nor java.security.auth.login" +
+                  ".config system property are specified");
+            }
+            // in this case, keytab and principal are specified and no jaas
+            // config is specified, so we will create one
+            LOG.info(
+                "Enabling ZK sasl client: jaasClientEntry = " + jaasClientEntry
+                    + ", principal = " + principal + ", keytab = " + keytab);
+            JaasConfiguration jconf =
+                new JaasConfiguration(jaasClientEntry, principal, keytab);
+            javax.security.auth.login.Configuration.setConfiguration(jconf);
+            setSystemPropertyIfUnset(ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY,
+                "true");
+            setSystemPropertyIfUnset(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
+                jaasClientEntry);
+          } else {
+            // in this case, jaas config is specified so we will not change it
+            LOG.info("Using existing ZK sasl configuration: " +
+                "jaasClientEntry = " + System.getProperty(
+                    ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY, "Client") +
+                ", sasl client = " + System.getProperty(
+                    ZooKeeperSaslClient.ENABLE_CLIENT_SASL_KEY,
+                    ZooKeeperSaslClient.ENABLE_CLIENT_SASL_DEFAULT) +
+                ", jaas = " + existingJaasConf);
+          }
           break;
 
         default:


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: YARN-7949. [UI2] ArtifactsId should not be a compulsory field for new service. Contributed by Yesha Vora.

Posted by ha...@apache.org.
YARN-7949. [UI2] ArtifactsId should not be a compulsory field for new service. Contributed by Yesha Vora.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1cd5736
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1cd5736
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1cd5736

Branch: refs/heads/HDFS-12996
Commit: d1cd573687fa3466a5ceb9a525141a8c3a8f686f
Parents: cc68395
Author: Sunil G <su...@apache.org>
Authored: Fri Feb 23 16:50:02 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Fri Feb 23 16:50:02 2018 +0530

----------------------------------------------------------------------
 .../main/webapp/app/components/service-component-table.js |  2 +-
 .../src/main/webapp/app/models/yarn-servicedef.js         | 10 ++++++----
 .../app/templates/components/service-component-table.hbs  |  2 +-
 3 files changed, 8 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1cd5736/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
index 5a9ae30..23c2cfb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/service-component-table.js
@@ -52,5 +52,5 @@ export default Ember.Component.extend({
     return !Ember.isNone(item);
   },
 
-  isValidCurrentComponent: Ember.computed.and('currentComponent', 'currentComponent.name', 'currentComponent.cpus', 'currentComponent.memory', 'currentComponent.numOfContainers', 'currentComponent.artifactId', 'currentComponent.launchCommand')
+  isValidCurrentComponent: Ember.computed.and('currentComponent', 'currentComponent.name', 'currentComponent.cpus', 'currentComponent.memory', 'currentComponent.numOfContainers', 'currentComponent.launchCommand')
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1cd5736/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
index 0439fb4..19c74e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-servicedef.js
@@ -189,10 +189,12 @@ export default DS.Model.extend({
     json['number_of_containers'] = record.get('numOfContainers');
     json['launch_command'] = record.get('launchCommand');
     json['dependencies'] = [];
-    json['artifact'] = {
-      id: record.get('artifactId'),
-      type: record.get('artifactType')
-    };
+    if (!Ember.isEmpty(record.get('artifactId'))) {
+      json['artifact'] = {
+        id: record.get('artifactId'),
+        type: record.get('artifactType')
+      };
+    }
     json['resource'] = {
       cpus: record.get('cpus'),
       memory: record.get('memory')

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1cd5736/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
index 8f3904d..9d519ae 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/service-component-table.hbs
@@ -90,7 +90,7 @@
           {{input type="number" min="0" class="form-control" value=currentComponent.numOfContainers}}
         </div>
         <div class="form-group">
-          <label class="required">Artifact Id</label>
+          <label>Artifact Id</label>
           {{input type="text" class="form-control" value=currentComponent.artifactId}}
         </div>
         <div class="form-group">


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
deleted file mode 100644
index f3f045e..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnFamily.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents the app_flow table column families.
- */
-public enum AppToFlowColumnFamily implements ColumnFamily<AppToFlowTable> {
-  /**
-   * Mapping column family houses known columns such as flowName and flowRunId.
-   */
-  MAPPING("m");
-
-  /**
-   * Byte representation of this column family.
-   */
-  private final byte[] bytes;
-
-  /**
-   * @param value create a column family with this name. Must be lower case and
-   *          without spaces.
-   */
-  AppToFlowColumnFamily(String value) {
-    // column families should be lower case and not contain any spaces.
-    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
-  }
-
-  public byte[] getBytes() {
-    return Bytes.copy(bytes);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
deleted file mode 100644
index 752a380..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowColumnPrefix.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies partially qualified columns for the app-to-flow table.
- */
-public enum AppToFlowColumnPrefix implements ColumnPrefix<AppToFlowTable> {
-
-  /**
-   * The flow name.
-   */
-  FLOW_NAME(AppToFlowColumnFamily.MAPPING, "flow_name"),
-
-  /**
-   * The flow run ID.
-   */
-  FLOW_RUN_ID(AppToFlowColumnFamily.MAPPING, "flow_run_id"),
-
-  /**
-   * The user.
-   */
-  USER_ID(AppToFlowColumnFamily.MAPPING, "user_id");
-
-  private final ColumnHelper<AppToFlowTable> column;
-  private final ColumnFamily<AppToFlowTable> columnFamily;
-  private final String columnPrefix;
-  private final byte[] columnPrefixBytes;
-
-  AppToFlowColumnPrefix(ColumnFamily<AppToFlowTable> columnFamily,
-      String columnPrefix) {
-    this.columnFamily = columnFamily;
-    this.columnPrefix = columnPrefix;
-    if (columnPrefix == null) {
-      this.columnPrefixBytes = null;
-    } else {
-      // Future-proof by ensuring the right column prefix hygiene.
-      this.columnPrefixBytes =
-          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
-    }
-    this.column = new ColumnHelper<AppToFlowTable>(columnFamily);
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  @Override
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<AppToFlowTable> tableMutator, byte[] qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        attributes);
-  }
-
-  @Override
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<AppToFlowTable> tableMutator, String qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        attributes);
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-  @Override
-  public Object readResult(Result result, String qualifier) throws IOException {
-    byte[] columnQualifier =
-        ColumnHelper.getColumnQualifier(columnPrefixBytes, qualifier);
-    return column.readResult(result, columnQualifier);
-  }
-
-  @Override
-  public <K> Map<K, Object> readResults(Result result,
-      KeyConverter<K> keyConverter)
-      throws IOException {
-    return column.readResults(result, columnPrefixBytes, keyConverter);
-  }
-
-  @Override
-  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
-      readResultsWithTimestamps(Result result,
-      KeyConverter<K> keyConverter) throws IOException {
-    return column.readResultsWithTimestamps(result, columnPrefixBytes,
-        keyConverter);
-  }
-
-  /**
-   * Retrieve an {@link AppToFlowColumnPrefix} given a name, or null if there
-   * is no match. The following holds true: {@code columnFor(x) == columnFor(y)}
-   * if and only if {@code x.equals(y)} or {@code (x == y == null)}
-   *
-   * @param columnPrefix Name of the column to retrieve
-   * @return the corresponding {@link AppToFlowColumnPrefix} or null
-   */
-  public static final AppToFlowColumnPrefix columnFor(String columnPrefix) {
-
-    // Match column based on value, assume column family matches.
-    for (AppToFlowColumnPrefix afcp : AppToFlowColumnPrefix.values()) {
-      // Find a match based only on name.
-      if (afcp.columnPrefix.equals(columnPrefix)) {
-        return afcp;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-
-  /**
-   * Retrieve an {@link AppToFlowColumnPrefix} given a name, or null if there
-   * is no match. The following holds true:
-   * {@code columnFor(a,x) == columnFor(b,y)} if and only if
-   * {@code (x == y == null)} or {@code a.equals(b) & x.equals(y)}
-   *
-   * @param columnFamily The columnFamily for which to retrieve the column.
-   * @param columnPrefix Name of the column to retrieve
-   * @return the corresponding {@link AppToFlowColumnPrefix} or null if both
-   *         arguments don't match.
-   */
-  public static final AppToFlowColumnPrefix columnFor(
-      AppToFlowColumnFamily columnFamily, String columnPrefix) {
-
-    // TODO: needs unit test to confirm and need to update javadoc to explain
-    // null prefix case.
-
-    for (AppToFlowColumnPrefix afcp : AppToFlowColumnPrefix.values()) {
-      // Find a match based column family and on name.
-      if (afcp.columnFamily.equals(columnFamily)
-          && (((columnPrefix == null) && (afcp.columnPrefix == null)) ||
-          (afcp.columnPrefix.equals(columnPrefix)))) {
-        return afcp;
-      }
-    }
-
-    // Default to null
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
deleted file mode 100644
index 146c475..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowRowKey.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
-
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-
-/**
- * Represents a row key for the app_flow table, which is the app id.
- */
-public class AppToFlowRowKey {
-  private final String appId;
-  private final KeyConverter<String> appIdKeyConverter =
-      new AppIdKeyConverter();
-
-  public AppToFlowRowKey(String appId) {
-    this.appId = appId;
-  }
-
-  public String getAppId() {
-    return appId;
-  }
-
-  /**
-   * Constructs a row key prefix for the app_flow table.
-   *
-   * @return byte array with the row key
-   */
-  public  byte[] getRowKey() {
-    return appIdKeyConverter.encode(appId);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey a rowkey represented as a byte array.
-   * @return an <cite>AppToFlowRowKey</cite> object.
-   */
-  public static AppToFlowRowKey parseRowKey(byte[] rowKey) {
-    String appId = new AppIdKeyConverter().decode(rowKey);
-    return new AppToFlowRowKey(appId);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
deleted file mode 100644
index 04da5c7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/AppToFlowTable.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
-
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * The app_flow table as column families mapping. Mapping stores
- * appId to flowName and flowRunId mapping information
- *
- * Example app_flow table record:
- *
- * <pre>
- * |--------------------------------------|
- * |  Row       | Column Family           |
- * |  key       | mapping                 |
- * |--------------------------------------|
- * | appId      | flow_name!cluster1:     |
- * |            | foo@daily_hive_report   |
- * |            |                         |
- * |            | flow_run_id!cluster1:   |
- * |            | 1452828720457           |
- * |            |                         |
- * |            | user_id!cluster1:       |
- * |            | admin                   |
- * |            |                         |
- * |            | flow_name!cluster2:     |
- * |            | bar@ad_hoc_query        |
- * |            |                         |
- * |            | flow_run_id!cluster2:   |
- * |            | 1452828498752           |
- * |            |                         |
- * |            | user_id!cluster2:       |
- * |            | joe                     |
- * |            |                         |
- * |--------------------------------------|
- * </pre>
- *
- * It is possible (although unlikely) in a multi-cluster environment that there
- * may be more than one applications for a given app id. Different clusters are
- * recorded as different sets of columns.
- */
-public class AppToFlowTable extends BaseTable<AppToFlowTable> {
-  /** app_flow prefix. */
-  private static final String PREFIX =
-      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "app-flow";
-
-  /** config param name that specifies the app_flow table name. */
-  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
-
-  /** default value for app_flow table name. */
-  private static final String DEFAULT_TABLE_NAME = "timelineservice.app_flow";
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(AppToFlowTable.class);
-
-  public AppToFlowTable() {
-    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
-   * (org.apache.hadoop.hbase.client.Admin,
-   * org.apache.hadoop.conf.Configuration)
-   */
-  public void createTable(Admin admin, Configuration hbaseConf)
-      throws IOException {
-
-    TableName table = getTableName(hbaseConf);
-    if (admin.tableExists(table)) {
-      // do not disable / delete existing table
-      // similar to the approach taken by map-reduce jobs when
-      // output directory exists
-      throw new IOException("Table " + table.getNameAsString()
-          + " already exists.");
-    }
-
-    HTableDescriptor appToFlowTableDescp = new HTableDescriptor(table);
-    HColumnDescriptor mappCF =
-        new HColumnDescriptor(AppToFlowColumnFamily.MAPPING.getBytes());
-    mappCF.setBloomFilterType(BloomType.ROWCOL);
-    appToFlowTableDescp.addFamily(mappCF);
-
-    appToFlowTableDescp
-        .setRegionSplitPolicyClassName(
-            "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
-    appToFlowTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
-        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
-    admin.createTable(appToFlowTableDescp,
-        TimelineHBaseSchemaConstants.getUsernameSplits());
-    LOG.info("Status of table creation for " + table.getNameAsString() + "="
-        + admin.tableExists(table));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
deleted file mode 100644
index f01d982..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/apptoflow/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow
- * contains classes related to implementation for app to flow table.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java
deleted file mode 100644
index 51604f0..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/AppIdKeyConverter.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-/**
- * Encodes and decodes {@link ApplicationId} for row keys.
- * App ID is stored in row key as 12 bytes, cluster timestamp section of app id
- * (long - 8 bytes) followed by sequence id section of app id (int - 4 bytes).
- */
-public final class AppIdKeyConverter implements KeyConverter<String> {
-
-  public AppIdKeyConverter() {
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * Converts/encodes a string app Id into a byte representation for (row) keys.
-   * For conversion, we extract cluster timestamp and sequence id from the
-   * string app id (calls ConverterUtils#toApplicationId(String) for
-   * conversion) and then store it in a byte array of length 12 (8 bytes (long)
-   * for cluster timestamp followed 4 bytes(int) for sequence id). Both cluster
-   * timestamp and sequence id are inverted so that the most recent cluster
-   * timestamp and highest sequence id appears first in the table (i.e.
-   * application id appears in a descending order).
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
-   * #encode(java.lang.Object)
-   */
-  @Override
-  public byte[] encode(String appIdStr) {
-    ApplicationId appId = ApplicationId.fromString(appIdStr);
-    byte[] appIdBytes = new byte[getKeySize()];
-    byte[] clusterTs = Bytes.toBytes(
-        LongConverter.invertLong(appId.getClusterTimestamp()));
-    System.arraycopy(clusterTs, 0, appIdBytes, 0, Bytes.SIZEOF_LONG);
-    byte[] seqId = Bytes.toBytes(
-        HBaseTimelineStorageUtils.invertInt(appId.getId()));
-    System.arraycopy(seqId, 0, appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT);
-    return appIdBytes;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * Converts/decodes a 12 byte representation of app id for (row) keys to an
-   * app id in string format which can be returned back to client.
-   * For decoding, 12 bytes are interpreted as 8 bytes of inverted cluster
-   * timestamp(long) followed by 4 bytes of inverted sequence id(int). Calls
-   * ApplicationId#toString to generate string representation of app id.
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
-   * #decode(byte[])
-   */
-  @Override
-  public String decode(byte[] appIdBytes) {
-    if (appIdBytes.length != getKeySize()) {
-      throw new IllegalArgumentException("Invalid app id in byte format");
-    }
-    long clusterTs = LongConverter.invertLong(
-        Bytes.toLong(appIdBytes, 0, Bytes.SIZEOF_LONG));
-    int seqId = HBaseTimelineStorageUtils.invertInt(
-        Bytes.toInt(appIdBytes, Bytes.SIZEOF_LONG, Bytes.SIZEOF_INT));
-    return HBaseTimelineStorageUtils.convertApplicationIdToString(
-        ApplicationId.newInstance(clusterTs, seqId));
-  }
-
-  /**
-   * Returns the size of app id after encoding.
-   *
-   * @return size of app id after encoding.
-   */
-  public static int getKeySize() {
-    return Bytes.SIZEOF_LONG + Bytes.SIZEOF_INT;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
deleted file mode 100644
index 93d809c..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BaseTable.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.BufferedMutator;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-
-/**
- * Implements behavior common to tables used in the timeline service storage. It
- * is thread-safe, and can be used by multiple threads concurrently.
- *
- * @param <T> reference to the table instance class itself for type safety.
- */
-public abstract class BaseTable<T> {
-
-  /**
-   * Name of config variable that is used to point to this table.
-   */
-  private final String tableNameConfName;
-
-  /**
-   * Unless the configuration overrides, this will be the default name for the
-   * table when it is created.
-   */
-  private final String defaultTableName;
-
-  /**
-   * @param tableNameConfName name of config variable that is used to point to
-   *          this table.
-   * @param defaultTableName Default table name if table from config is not
-   *          found.
-   */
-  protected BaseTable(String tableNameConfName, String defaultTableName) {
-    this.tableNameConfName = tableNameConfName;
-    this.defaultTableName = defaultTableName;
-  }
-
-  /**
-   * Used to create a type-safe mutator for this table.
-   *
-   * @param hbaseConf used to read table name.
-   * @param conn used to create a table from.
-   * @return a type safe {@link BufferedMutator} for the entity table.
-   * @throws IOException if any exception occurs while creating mutator for the
-   *     table.
-   */
-  public TypedBufferedMutator<T> getTableMutator(Configuration hbaseConf,
-      Connection conn) throws IOException {
-
-    TableName tableName = this.getTableName(hbaseConf);
-
-    // Plain buffered mutator
-    BufferedMutator bufferedMutator = conn.getBufferedMutator(tableName);
-
-    // Now make this thing type safe.
-    // This is how service initialization should hang on to this variable, with
-    // the proper type
-    TypedBufferedMutator<T> table =
-        new BufferedMutatorDelegator<T>(bufferedMutator);
-
-    return table;
-  }
-
-  /**
-   * @param hbaseConf used to read settings that override defaults
-   * @param conn used to create table from
-   * @param scan that specifies what you want to read from this table.
-   * @return scanner for the table.
-   * @throws IOException if any exception occurs while getting the scanner.
-   */
-  public ResultScanner getResultScanner(Configuration hbaseConf,
-      Connection conn, Scan scan) throws IOException {
-    Table table = conn.getTable(getTableName(hbaseConf));
-    return table.getScanner(scan);
-  }
-
-  /**
-   *
-   * @param hbaseConf used to read settings that override defaults
-   * @param conn used to create table from
-   * @param get that specifies what single row you want to get from this table
-   * @return result of get operation
-   * @throws IOException if any exception occurs while getting the result.
-   */
-  public Result getResult(Configuration hbaseConf, Connection conn, Get get)
-      throws IOException {
-    Table table = conn.getTable(getTableName(hbaseConf));
-    return table.get(get);
-  }
-
-  /**
-   * Get the table name for the input table.
-   *
-   * @param conf HBase configuration from which table name will be fetched.
-   * @param tableName name of the table to be fetched
-   * @return A {@link TableName} object.
-   */
-  public static TableName getTableName(Configuration conf, String tableName) {
-    String tableSchemaPrefix =  conf.get(
-        YarnConfiguration.TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME,
-        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX);
-    return TableName.valueOf(tableSchemaPrefix + tableName);
-  }
-
-  /**
-   * Get the table name for this table.
-   *
-   * @param conf HBase configuration from which table name will be fetched.
-   * @return A {@link TableName} object.
-   */
-  public TableName getTableName(Configuration conf) {
-    String tableName = conf.get(tableNameConfName, defaultTableName);
-    return getTableName(conf, tableName);
-  }
-
-  /**
-   * Get the table name based on the input config parameters.
-   *
-   * @param conf HBase configuration from which table name will be fetched.
-   * @param tableNameInConf the table name parameter in conf.
-   * @param defaultTableName the default table name.
-   * @return A {@link TableName} object.
-   */
-  public static TableName getTableName(Configuration conf,
-      String tableNameInConf, String defaultTableName) {
-    String tableName = conf.get(tableNameInConf, defaultTableName);
-    return getTableName(conf, tableName);
-  }
-
-  /**
-   * Used to create the table in HBase. Should be called only once (per HBase
-   * instance).
-   *
-   * @param admin Used for doing HBase table operations.
-   * @param hbaseConf Hbase configuration.
-   * @throws IOException if any exception occurs while creating the table.
-   */
-  public abstract void createTable(Admin admin, Configuration hbaseConf)
-      throws IOException;
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BufferedMutatorDelegator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BufferedMutatorDelegator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BufferedMutatorDelegator.java
deleted file mode 100644
index cf469a5..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/BufferedMutatorDelegator.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.BufferedMutator;
-import org.apache.hadoop.hbase.client.Mutation;
-
-/**
- * To be used to wrap an actual {@link BufferedMutator} in a type safe manner.
- *
- * @param <T> The class referring to the table to be written to.
- */
-class BufferedMutatorDelegator<T> implements TypedBufferedMutator<T> {
-
-  private final BufferedMutator bufferedMutator;
-
-  /**
-   * @param bufferedMutator the mutator to be wrapped for delegation. Shall not
-   *          be null.
-   */
-  public BufferedMutatorDelegator(BufferedMutator bufferedMutator) {
-    this.bufferedMutator = bufferedMutator;
-  }
-
-  public TableName getName() {
-    return bufferedMutator.getName();
-  }
-
-  public Configuration getConfiguration() {
-    return bufferedMutator.getConfiguration();
-  }
-
-  public void mutate(Mutation mutation) throws IOException {
-    bufferedMutator.mutate(mutation);
-  }
-
-  public void mutate(List<? extends Mutation> mutations) throws IOException {
-    bufferedMutator.mutate(mutations);
-  }
-
-  public void close() throws IOException {
-    bufferedMutator.close();
-  }
-
-  public void flush() throws IOException {
-    bufferedMutator.flush();
-  }
-
-  public long getWriteBufferSize() {
-    return bufferedMutator.getWriteBufferSize();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Column.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Column.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Column.java
deleted file mode 100644
index 90f2de4..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Column.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * A Column represents the way to store a fully qualified column in a specific
- * table.
- */
-public interface Column<T> {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *          responsible to pass a mutator for the table that actually has this
-   *          column.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *          used.
-   * @param attributes Map of attributes for this mutation. used in the
-   *     coprocessor to set/read the cell tags. Can be null.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *          Nothing gets written when null.
-   * @throws IOException if there is any exception encountered during store.
-   */
-  void store(byte[] rowKey, TypedBufferedMutator<T> tableMutator,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @return result object (can be cast to whatever object was written to), or
-   *         null when result doesn't contain this column.
-   * @throws IOException if there is any exception encountered while reading
-   *     result.
-   */
-  Object readResult(Result result) throws IOException;
-
-  /**
-   * Returns column family name(as bytes) associated with this column.
-   * @return a byte array encoding column family for this column qualifier.
-   */
-  byte[] getColumnFamilyBytes();
-
-  /**
-   * Get byte representation for this column qualifier.
-   * @return a byte array representing column qualifier.
-   */
-  byte[] getColumnQualifierBytes();
-
-  /**
-   * Returns value converter implementation associated with this column.
-   * @return a {@link ValueConverter} implementation.
-   */
-  ValueConverter getValueConverter();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnFamily.java
deleted file mode 100644
index 452adcd..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnFamily.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-/**
- * Type safe column family.
- *
- * @param <T> refers to the table for which this column family is used for.
- */
-public interface ColumnFamily<T> {
-
-  /**
-   * Keep a local copy if you need to avoid overhead of repeated cloning.
-   *
-   * @return a clone of the byte representation of the column family.
-   */
-  byte[] getBytes();
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
deleted file mode 100644
index 9f95d44..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnHelper.java
+++ /dev/null
@@ -1,414 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is meant to be used only by explicit Columns, and not directly to
- * write by clients.
- *
- * @param <T> refers to the table.
- */
-public class ColumnHelper<T> {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ColumnHelper.class);
-
-  private final ColumnFamily<T> columnFamily;
-
-  /**
-   * Local copy of bytes representation of columnFamily so that we can avoid
-   * cloning a new copy over and over.
-   */
-  private final byte[] columnFamilyBytes;
-
-  private final ValueConverter converter;
-
-  private final boolean supplementTs;
-
-  public ColumnHelper(ColumnFamily<T> columnFamily) {
-    this(columnFamily, GenericConverter.getInstance());
-  }
-
-  public ColumnHelper(ColumnFamily<T> columnFamily, ValueConverter converter) {
-    this(columnFamily, converter, false);
-  }
-
-  /**
-   * @param columnFamily column family implementation.
-   * @param converter converter use to encode/decode values stored in the column
-   *     or column prefix.
-   * @param needSupplementTs flag to indicate if cell timestamp needs to be
-   *     modified for this column by calling
-   *     {@link TimestampGenerator#getSupplementedTimestamp(long, String)}. This
-   *     would be required for columns(such as metrics in flow run table) where
-   *     potential collisions can occur due to same timestamp.
-   */
-  public ColumnHelper(ColumnFamily<T> columnFamily, ValueConverter converter,
-      boolean needSupplementTs) {
-    this.columnFamily = columnFamily;
-    columnFamilyBytes = columnFamily.getBytes();
-    if (converter == null) {
-      this.converter = GenericConverter.getInstance();
-    } else {
-      this.converter = converter;
-    }
-    this.supplementTs = needSupplementTs;
-  }
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent over
-   * the wire as part of a batch.
-   *
-   * @param rowKey
-   *          identifying the row to write. Nothing gets written when null.
-   * @param tableMutator
-   *          used to modify the underlying HBase table
-   * @param columnQualifier
-   *          column qualifier. Nothing gets written when null.
-   * @param timestamp
-   *          version timestamp. When null the current timestamp multiplied with
-   *          TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
-   *          app id will be used
-   * @param inputValue
-   *          the value to write to the rowKey and column qualifier. Nothing
-   *          gets written when null.
-   * @param attributes Attributes to be set for HBase Put.
-   * @throws IOException if any problem occurs during store operation(sending
-   *          mutation to table).
-   */
-  public void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
-      byte[] columnQualifier, Long timestamp, Object inputValue,
-      Attribute... attributes) throws IOException {
-    if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) {
-      return;
-    }
-    Put p = new Put(rowKey);
-    timestamp = getPutTimestamp(timestamp, attributes);
-    p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
-        converter.encodeValue(inputValue));
-    if ((attributes != null) && (attributes.length > 0)) {
-      for (Attribute attribute : attributes) {
-        p.setAttribute(attribute.getName(), attribute.getValue());
-      }
-    }
-    tableMutator.mutate(p);
-  }
-
-  /*
-   * Figures out the cell timestamp used in the Put For storing.
-   * Will supplement the timestamp if required. Typically done for flow run
-   * table.If we supplement the timestamp, we left shift the timestamp and
-   * supplement it with the AppId id so that there are no collisions in the flow
-   * run table's cells.
-   */
-  private long getPutTimestamp(Long timestamp, Attribute[] attributes) {
-    if (timestamp == null) {
-      timestamp = System.currentTimeMillis();
-    }
-    if (!this.supplementTs) {
-      return timestamp;
-    } else {
-      String appId = getAppIdFromAttributes(attributes);
-      long supplementedTS = TimestampGenerator.getSupplementedTimestamp(
-          timestamp, appId);
-      return supplementedTS;
-    }
-  }
-
-  private String getAppIdFromAttributes(Attribute[] attributes) {
-    if (attributes == null) {
-      return null;
-    }
-    String appId = null;
-    for (Attribute attribute : attributes) {
-      if (AggregationCompactionDimension.APPLICATION_ID.toString().equals(
-          attribute.getName())) {
-        appId = Bytes.toString(attribute.getValue());
-      }
-    }
-    return appId;
-  }
-
-  /**
-   * @return the column family for this column implementation.
-   */
-  public ColumnFamily<T> getColumnFamily() {
-    return columnFamily;
-  }
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result from which to read the value. Cannot be null
-   * @param columnQualifierBytes referring to the column to be read.
-   * @return latest version of the specified column of whichever object was
-   *         written.
-   * @throws IOException if any problem occurs while reading result.
-   */
-  public Object readResult(Result result, byte[] columnQualifierBytes)
-      throws IOException {
-    if (result == null || columnQualifierBytes == null) {
-      return null;
-    }
-
-    // Would have preferred to be able to use getValueAsByteBuffer and get a
-    // ByteBuffer to avoid copy, but GenericObjectMapper doesn't seem to like
-    // that.
-    byte[] value = result.getValue(columnFamilyBytes, columnQualifierBytes);
-    return converter.decodeValue(value);
-  }
-
-  /**
-   * @param result from which to reads data with timestamps
-   * @param columnPrefixBytes optional prefix to limit columns. If null all
-   *          columns are returned.
-   * @param <K> identifies the type of column name(indicated by type of key
-   *     converter).
-   * @param <V> the type of the values. The values will be cast into that type.
-   * @param keyConverter used to convert column bytes to the appropriate key
-   *     type.
-   * @return the cell values at each respective time in for form
-   *         {@literal {idA={timestamp1->value1}, idA={timestamp2->value2},
-   *         idB={timestamp3->value3}, idC={timestamp1->value4}}}
-   * @throws IOException if any problem occurs while reading results.
-   */
-  @SuppressWarnings("unchecked")
-  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
-      readResultsWithTimestamps(Result result, byte[] columnPrefixBytes,
-          KeyConverter<K> keyConverter) throws IOException {
-
-    NavigableMap<K, NavigableMap<Long, V>> results = new TreeMap<>();
-
-    if (result != null) {
-      NavigableMap<
-          byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> resultMap =
-              result.getMap();
-
-      NavigableMap<byte[], NavigableMap<Long, byte[]>> columnCellMap =
-          resultMap.get(columnFamilyBytes);
-      // could be that there is no such column family.
-      if (columnCellMap != null) {
-        for (Entry<byte[], NavigableMap<Long, byte[]>> entry : columnCellMap
-            .entrySet()) {
-          K converterColumnKey = null;
-          if (columnPrefixBytes == null) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("null prefix was specified; returning all columns");
-            }
-            try {
-              converterColumnKey = keyConverter.decode(entry.getKey());
-            } catch (IllegalArgumentException iae) {
-              LOG.error("Illegal column found, skipping this column.", iae);
-              continue;
-            }
-          } else {
-            // A non-null prefix means columns are actually of the form
-            // prefix!columnNameRemainder
-            byte[][] columnNameParts =
-                Separator.QUALIFIERS.split(entry.getKey(), 2);
-            byte[] actualColumnPrefixBytes = columnNameParts[0];
-            if (Bytes.equals(columnPrefixBytes, actualColumnPrefixBytes)
-                && columnNameParts.length == 2) {
-              try {
-                // This is the prefix that we want
-                converterColumnKey = keyConverter.decode(columnNameParts[1]);
-              } catch (IllegalArgumentException iae) {
-                LOG.error("Illegal column found, skipping this column.", iae);
-                continue;
-              }
-            }
-          }
-
-          // If this column has the prefix we want
-          if (converterColumnKey != null) {
-            NavigableMap<Long, V> cellResults =
-                new TreeMap<Long, V>();
-            NavigableMap<Long, byte[]> cells = entry.getValue();
-            if (cells != null) {
-              for (Entry<Long, byte[]> cell : cells.entrySet()) {
-                V value =
-                    (V) converter.decodeValue(cell.getValue());
-                Long ts = supplementTs ? TimestampGenerator.
-                    getTruncatedTimestamp(cell.getKey()) : cell.getKey();
-                cellResults.put(ts, value);
-              }
-            }
-            results.put(converterColumnKey, cellResults);
-          }
-        } // for entry : columnCellMap
-      } // if columnCellMap != null
-    } // if result != null
-    return results;
-  }
-
-  /**
-   * @param <K> identifies the type of column name(indicated by type of key
-   *     converter).
-   * @param result from which to read columns
-   * @param columnPrefixBytes optional prefix to limit columns. If null all
-   *        columns are returned.
-   * @param keyConverter used to convert column bytes to the appropriate key
-   *          type.
-   * @return the latest values of columns in the column family. If the column
-   *         prefix is null, the column qualifier is returned as Strings. For a
-   *         non-null column prefix bytes, the column qualifier is returned as
-   *         a list of parts, each part a byte[]. This is to facilitate
-   *         returning byte arrays of values that were not Strings.
-   * @throws IOException if any problem occurs while reading results.
-   */
-  public <K> Map<K, Object> readResults(Result result,
-      byte[] columnPrefixBytes, KeyConverter<K> keyConverter)
-      throws IOException {
-    Map<K, Object> results = new HashMap<K, Object>();
-
-    if (result != null) {
-      Map<byte[], byte[]> columns = result.getFamilyMap(columnFamilyBytes);
-      for (Entry<byte[], byte[]> entry : columns.entrySet()) {
-        byte[] columnKey = entry.getKey();
-        if (columnKey != null && columnKey.length > 0) {
-
-          K converterColumnKey = null;
-          if (columnPrefixBytes == null) {
-            try {
-              converterColumnKey = keyConverter.decode(columnKey);
-            } catch (IllegalArgumentException iae) {
-              LOG.error("Illegal column found, skipping this column.", iae);
-              continue;
-            }
-          } else {
-            // A non-null prefix means columns are actually of the form
-            // prefix!columnNameRemainder
-            byte[][] columnNameParts = Separator.QUALIFIERS.split(columnKey, 2);
-            if (columnNameParts.length > 0) {
-              byte[] actualColumnPrefixBytes = columnNameParts[0];
-              // If this is the prefix that we want
-              if (Bytes.equals(columnPrefixBytes, actualColumnPrefixBytes)
-                  && columnNameParts.length == 2) {
-                try {
-                  converterColumnKey = keyConverter.decode(columnNameParts[1]);
-                } catch (IllegalArgumentException iae) {
-                  LOG.error("Illegal column found, skipping this column.", iae);
-                  continue;
-                }
-              }
-            }
-          } // if-else
-
-          // If the columnPrefix is null (we want all columns), or the actual
-          // prefix matches the given prefix we want this column
-          if (converterColumnKey != null) {
-            Object value = converter.decodeValue(entry.getValue());
-            // we return the columnQualifier in parts since we don't know
-            // which part is of which data type.
-            results.put(converterColumnKey, value);
-          }
-        }
-      } // for entry
-    }
-    return results;
-  }
-
-  /**
-   * @param columnPrefixBytes The byte representation for the column prefix.
-   *          Should not contain {@link Separator#QUALIFIERS}.
-   * @param qualifier for the remainder of the column.
-   *          {@link Separator#QUALIFIERS} is permissible in the qualifier
-   *          as it is joined only with the column prefix bytes.
-   * @return fully sanitized column qualifier that is a combination of prefix
-   *         and qualifier. If prefix is null, the result is simply the encoded
-   *         qualifier without any separator.
-   */
-  public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
-      String qualifier) {
-
-    // We don't want column names to have spaces / tabs.
-    byte[] encodedQualifier =
-        Separator.encode(qualifier, Separator.SPACE, Separator.TAB);
-    if (columnPrefixBytes == null) {
-      return encodedQualifier;
-    }
-
-    // Convert qualifier to lower case, strip of separators and tag on column
-    // prefix.
-    byte[] columnQualifier =
-        Separator.QUALIFIERS.join(columnPrefixBytes, encodedQualifier);
-    return columnQualifier;
-  }
-
-  /**
-   * @param columnPrefixBytes The byte representation for the column prefix.
-   *          Should not contain {@link Separator#QUALIFIERS}.
-   * @param qualifier for the remainder of the column.
-   * @return fully sanitized column qualifier that is a combination of prefix
-   *         and qualifier. If prefix is null, the result is simply the encoded
-   *         qualifier without any separator.
-   */
-  public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
-      long qualifier) {
-
-    if (columnPrefixBytes == null) {
-      return Bytes.toBytes(qualifier);
-    }
-
-    // Convert qualifier to lower case, strip of separators and tag on column
-    // prefix.
-    byte[] columnQualifier =
-        Separator.QUALIFIERS.join(columnPrefixBytes, Bytes.toBytes(qualifier));
-    return columnQualifier;
-  }
-
-  public ValueConverter getValueConverter() {
-    return converter;
-  }
-
-  /**
-   * @param columnPrefixBytes The byte representation for the column prefix.
-   *          Should not contain {@link Separator#QUALIFIERS}.
-   * @param qualifier the byte representation for the remainder of the column.
-   * @return fully sanitized column qualifier that is a combination of prefix
-   *         and qualifier. If prefix is null, the result is simply the encoded
-   *         qualifier without any separator.
-   */
-  public static byte[] getColumnQualifier(byte[] columnPrefixBytes,
-      byte[] qualifier) {
-
-    if (columnPrefixBytes == null) {
-      return qualifier;
-    }
-
-    byte[] columnQualifier =
-        Separator.QUALIFIERS.join(columnPrefixBytes, qualifier);
-    return columnQualifier;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
deleted file mode 100644
index 89aa013..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnPrefix.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Used to represent a partially qualified column, where the actual column name
- * will be composed of a prefix and the remainder of the column qualifier. The
- * prefix can be null, in which case the column qualifier will be completely
- * determined when the values are stored.
- */
-public interface ColumnPrefix<T> {
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *          responsible to pass a mutator for the table that actually has this
-   *          column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *          used.
-   * @param attributes attributes for the mutation that are used by the
-   *          coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *          Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   *     store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator<T> tableMutator,
-      byte[] qualifier, Long timestamp, Object inputValue,
-      Attribute... attributes) throws IOException;
-
-  /**
-   * Sends a Mutation to the table. The mutations will be buffered and sent over
-   * the wire as part of a batch.
-   *
-   * @param rowKey identifying the row to write. Nothing gets written when null.
-   * @param tableMutator used to modify the underlying HBase table. Caller is
-   *          responsible to pass a mutator for the table that actually has this
-   *          column.
-   * @param qualifier column qualifier. Nothing gets written when null.
-   * @param timestamp version timestamp. When null the server timestamp will be
-   *          used.
-   * @param attributes attributes for the mutation that are used by the
-   *          coprocessor to set/read the cell tags.
-   * @param inputValue the value to write to the rowKey and column qualifier.
-   *          Nothing gets written when null.
-   * @throws IOException if there is any exception encountered while doing
-   *     store operation(sending mutation to the table).
-   */
-  void store(byte[] rowKey, TypedBufferedMutator<T> tableMutator,
-      String qualifier, Long timestamp, Object inputValue,
-      Attribute... attributes) throws IOException;
-
-  /**
-   * Get the latest version of this specified column. Note: this call clones the
-   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
-   *
-   * @param result Cannot be null
-   * @param qualifier column qualifier. Nothing gets read when null.
-   * @return result object (can be cast to whatever object was written to) or
-   *         null when specified column qualifier for this prefix doesn't exist
-   *         in the result.
-   * @throws IOException if there is any exception encountered while reading
-   *     result.
-   */
-  Object readResult(Result result, String qualifier) throws IOException;
-
-  /**
-   *
-   * @param <K> identifies the type of key converter.
-   * @param result from which to read columns.
-   * @param keyConverter used to convert column bytes to the appropriate key
-   *          type
-   * @return the latest values of columns in the column family with this prefix
-   *         (or all of them if the prefix value is null).
-   * @throws IOException if there is any exception encountered while reading
-   *           results.
-   */
-  <K> Map<K, Object> readResults(Result result, KeyConverter<K> keyConverter)
-      throws IOException;
-
-  /**
-   * @param result from which to reads data with timestamps.
-   * @param <K> identifies the type of key converter.
-   * @param <V> the type of the values. The values will be cast into that type.
-   * @param keyConverter used to convert column bytes to the appropriate key
-   *     type.
-   * @return the cell values at each respective time in for form
-   *         {@literal {idA={timestamp1->value1}, idA={timestamp2->value2},
-   *         idB={timestamp3->value3}, idC={timestamp1->value4}}}
-   * @throws IOException if there is any exception encountered while reading
-   *     result.
-   */
-  <K, V> NavigableMap<K, NavigableMap<Long, V>> readResultsWithTimestamps(
-      Result result, KeyConverter<K> keyConverter) throws IOException;
-
-  /**
-   * @param qualifierPrefix Column qualifier or prefix of qualifier.
-   * @return a byte array encoding column prefix and qualifier/prefix passed.
-   */
-  byte[] getColumnPrefixBytes(String qualifierPrefix);
-
-  /**
-   * @param qualifierPrefix Column qualifier or prefix of qualifier.
-   * @return a byte array encoding column prefix and qualifier/prefix passed.
-   */
-  byte[] getColumnPrefixBytes(byte[] qualifierPrefix);
-
-  /**
-   * Returns column family name(as bytes) associated with this column prefix.
-   * @return a byte array encoding column family for this prefix.
-   */
-  byte[] getColumnFamilyBytes();
-
-  /**
-   * Returns value converter implementation associated with this column prefix.
-   * @return a {@link ValueConverter} implementation.
-   */
-  ValueConverter getValueConverter();
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnName.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnName.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnName.java
deleted file mode 100644
index 8445575..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnName.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-/**
- * Encapsulates information about Event column names for application and entity
- * tables. Used while encoding/decoding event column names.
- */
-public class EventColumnName {
-
-  private final String id;
-  private final Long timestamp;
-  private final String infoKey;
-  private final KeyConverter<EventColumnName> eventColumnNameConverter =
-      new EventColumnNameConverter();
-
-  public EventColumnName(String id, Long timestamp, String infoKey) {
-    this.id = id;
-    this.timestamp = timestamp;
-    this.infoKey = infoKey;
-  }
-
-  public String getId() {
-    return id;
-  }
-
-  public Long getTimestamp() {
-    return timestamp;
-  }
-
-  public String getInfoKey() {
-    return infoKey;
-  }
-
-  /**
-   * @return a byte array with each components/fields separated by
-   *         Separator#VALUES. This leads to an event column name of the form
-   *         eventId=timestamp=infokey. If both timestamp and infokey are null,
-   *         then a qualifier of the form eventId=timestamp= is returned. If
-   *         only infokey is null, then a qualifier of the form eventId= is
-   *         returned. These prefix forms are useful for queries that intend to
-   *         retrieve more than one specific column name.
-   */
-  public byte[] getColumnQualifier() {
-    return eventColumnNameConverter.encode(this);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
deleted file mode 100644
index d3ef897..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/EventColumnNameConverter.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Encodes and decodes event column names for application and entity tables.
- * The event column name is of the form : eventId=timestamp=infokey.
- * If info is not associated with the event, event column name is of the form :
- * eventId=timestamp=
- * Event timestamp is long and rest are strings.
- * Column prefixes are not part of the eventcolumn name passed for encoding. It
- * is added later, if required in the associated ColumnPrefix implementations.
- */
-public final class EventColumnNameConverter
-    implements KeyConverter<EventColumnName> {
-
-  public EventColumnNameConverter() {
-  }
-
-  // eventId=timestamp=infokey are of types String, Long String
-  // Strings are variable in size (i.e. end whenever separator is encountered).
-  // This is used while decoding and helps in determining where to split.
-  private static final int[] SEGMENT_SIZES = {
-      Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE };
-
-  /*
-   * (non-Javadoc)
-   *
-   * Encodes EventColumnName into a byte array with each component/field in
-   * EventColumnName separated by Separator#VALUES. This leads to an event
-   * column name of the form eventId=timestamp=infokey.
-   * If timestamp in passed EventColumnName object is null (eventId is not null)
-   * this returns a column prefix of the form eventId= and if infokey in
-   * EventColumnName is null (other 2 components are not null), this returns a
-   * column name of the form eventId=timestamp=
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
-   * #encode(java.lang.Object)
-   */
-  @Override
-  public byte[] encode(EventColumnName key) {
-    byte[] first = Separator.encode(key.getId(), Separator.SPACE, Separator.TAB,
-        Separator.VALUES);
-    if (key.getTimestamp() == null) {
-      return Separator.VALUES.join(first, Separator.EMPTY_BYTES);
-    }
-    byte[] second = Bytes.toBytes(
-        LongConverter.invertLong(key.getTimestamp()));
-    if (key.getInfoKey() == null) {
-      return Separator.VALUES.join(first, second, Separator.EMPTY_BYTES);
-    }
-    return Separator.VALUES.join(first, second, Separator.encode(
-        key.getInfoKey(), Separator.SPACE, Separator.TAB, Separator.VALUES));
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * Decodes an event column name of the form eventId=timestamp= or
-   * eventId=timestamp=infoKey represented in byte format and converts it into
-   * an EventColumnName object.
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
-   * #decode(byte[])
-   */
-  @Override
-  public EventColumnName decode(byte[] bytes) {
-    byte[][] components = Separator.VALUES.split(bytes, SEGMENT_SIZES);
-    if (components.length != 3) {
-      throw new IllegalArgumentException("the column name is not valid");
-    }
-    String id = Separator.decode(Bytes.toString(components[0]),
-        Separator.VALUES, Separator.TAB, Separator.SPACE);
-    Long ts = LongConverter.invertLong(Bytes.toLong(components[1]));
-    String infoKey = components[2].length == 0 ? null :
-        Separator.decode(Bytes.toString(components[2]),
-            Separator.VALUES, Separator.TAB, Separator.SPACE);
-    return new EventColumnName(id, ts, infoKey);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/GenericConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/GenericConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/GenericConverter.java
deleted file mode 100644
index c34bfcb..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/GenericConverter.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.common;
-
-import java.io.IOException;
-
-import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
-
-/**
- * Uses GenericObjectMapper to encode objects as bytes and decode bytes as
- * objects.
- */
-public final class GenericConverter implements ValueConverter {
-  private static final GenericConverter INSTANCE = new GenericConverter();
-
-  private GenericConverter() {
-  }
-
-  public static GenericConverter getInstance() {
-    return INSTANCE;
-  }
-
-  @Override
-  public byte[] encodeValue(Object value) throws IOException {
-    return GenericObjectMapper.write(value);
-  }
-
-  @Override
-  public Object decodeValue(byte[] bytes) throws IOException {
-    return GenericObjectMapper.read(bytes);
-  }
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-5028. RMStateStore should trim down app state for completed applications. Contributed by Gergo Repas.

Posted by ha...@apache.org.
YARN-5028. RMStateStore should trim down app state for completed applications. Contributed by Gergo Repas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92cbbfe7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92cbbfe7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92cbbfe7

Branch: refs/heads/HDFS-12996
Commit: 92cbbfe79ec009a19a71a7f44329a4b2f9fa9be6
Parents: 004b722
Author: Yufei Gu <yu...@apache.org>
Authored: Wed Feb 21 11:42:26 2018 -0800
Committer: Yufei Gu <yu...@apache.org>
Committed: Wed Feb 21 11:42:51 2018 -0800

----------------------------------------------------------------------
 .../resourcemanager/recovery/RMStateStore.java  | 34 +++++++++-
 .../recovery/RMStateStoreTestBase.java          |  3 +
 .../recovery/TestZKRMStateStore.java            | 66 ++++++++++++++++++++
 3 files changed, 102 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92cbbfe7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index f0ab324..bbe208d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -65,6 +66,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.Applicatio
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
@@ -257,6 +259,9 @@ public abstract class RMStateStore extends AbstractService {
           appState.getApplicationSubmissionContext().getApplicationId();
       LOG.info("Updating info for app: " + appId);
       try {
+        if (isAppStateFinal(appState)) {
+          pruneAppState(appState);
+        }
         store.updateApplicationStateInternal(appId, appState);
         if (((RMStateUpdateAppEvent) event).isNotifyApplication()) {
           store.notifyApplication(new RMAppEvent(appId,
@@ -276,7 +281,34 @@ public abstract class RMStateStore extends AbstractService {
         }
       }
       return finalState(isFenced);
-    };
+    }
+
+    private boolean isAppStateFinal(ApplicationStateData appState) {
+      RMAppState state = appState.getState();
+      return state == RMAppState.FINISHED || state == RMAppState.FAILED ||
+          state == RMAppState.KILLED;
+    }
+
+    private void pruneAppState(ApplicationStateData appState) {
+      ApplicationSubmissionContext srcCtx =
+          appState.getApplicationSubmissionContext();
+      ApplicationSubmissionContextPBImpl context =
+          new ApplicationSubmissionContextPBImpl();
+      // most fields in the ApplicationSubmissionContext are not needed,
+      // but the following few need to be present for recovery to succeed
+      context.setApplicationId(srcCtx.getApplicationId());
+      context.setResource(srcCtx.getResource());
+      context.setQueue(srcCtx.getQueue());
+      context.setAMContainerResourceRequests(
+          srcCtx.getAMContainerResourceRequests());
+      context.setApplicationType(srcCtx.getApplicationType());
+      ContainerLaunchContextPBImpl amContainerSpec =
+              new ContainerLaunchContextPBImpl();
+      amContainerSpec.setApplicationACLs(
+              srcCtx.getAMContainerSpec().getApplicationACLs());
+      context.setAMContainerSpec(amContainerSpec);
+      appState.setApplicationSubmissionContext(context);
+    }
   }
 
   private static class RemoveAppTransition implements

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92cbbfe7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
index 453d805..dbb2148 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
@@ -162,6 +163,7 @@ public class RMStateStoreTestBase {
     ApplicationSubmissionContext context =
         new ApplicationSubmissionContextPBImpl();
     context.setApplicationId(appId);
+    context.setAMContainerSpec(new ContainerLaunchContextPBImpl());
 
     RMApp mockApp = mock(RMApp.class);
     when(mockApp.getApplicationId()).thenReturn(appId);
@@ -378,6 +380,7 @@ public class RMStateStoreTestBase {
     ApplicationSubmissionContext dummyContext =
         new ApplicationSubmissionContextPBImpl();
     dummyContext.setApplicationId(dummyAppId);
+    dummyContext.setAMContainerSpec(new ContainerLaunchContextPBImpl());
     ApplicationStateData dummyApp =
         ApplicationStateData.newInstance(appState.getSubmitTime(),
             appState.getStartTime(), appState.getUser(), dummyContext,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92cbbfe7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
index 6a8f47d..0a1b152 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
@@ -35,7 +35,9 @@ import org.apache.hadoop.service.Service;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.records.*;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerLaunchContextPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Event;
@@ -49,6 +51,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
@@ -83,6 +86,7 @@ import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -845,6 +849,7 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
       ApplicationSubmissionContext context =
           new ApplicationSubmissionContextPBImpl();
       context.setApplicationId(appId);
+      context.setAMContainerSpec(new ContainerLaunchContextPBImpl());
       appStateNew = createAppState(context, submitTime, startTime, finishTime,
           true);
     } else {
@@ -1488,4 +1493,65 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
         tokensWithIndex, sequenceNumber, 3);
     store.close();
   }
+
+  @Test
+  public void testAppSubmissionContextIsPrunedInFinalApplicationState()
+      throws Exception {
+    TestZKRMStateStoreTester zkTester = new TestZKRMStateStoreTester();
+    ApplicationId appId = ApplicationId.fromString("application_1234_0010");
+
+    Configuration conf = createConfForDelegationTokenNodeSplit(1);
+    RMStateStore store = zkTester.getRMStateStore(conf);
+    ApplicationSubmissionContext ctx =
+        new ApplicationSubmissionContextPBImpl();
+    ctx.setApplicationId(appId);
+    ctx.setQueue("a_queue");
+    ContainerLaunchContextPBImpl containerLaunchCtx =
+        new ContainerLaunchContextPBImpl();
+    containerLaunchCtx.setCommands(Collections.singletonList("a_command"));
+    ctx.setAMContainerSpec(containerLaunchCtx);
+    Resource resource = new ResourcePBImpl();
+    resource.setMemorySize(17L);
+    ctx.setResource(resource);
+    Map<String, String> schedulingPropertiesMap =
+        Collections.singletonMap("a_key", "a_value");
+    ctx.setApplicationSchedulingPropertiesMap(schedulingPropertiesMap);
+    ApplicationStateDataPBImpl appState = new ApplicationStateDataPBImpl();
+    appState.setState(RMAppState.RUNNING);
+    appState.setApplicationSubmissionContext(ctx);
+    store.storeApplicationStateInternal(appId, appState);
+
+    RMState rmState = store.loadState();
+    assertEquals(1, rmState.getApplicationState().size());
+    ctx = rmState.getApplicationState().get(appId)
+        .getApplicationSubmissionContext();
+
+    appState.setState(RMAppState.RUNNING);
+    store.handleStoreEvent(new RMStateUpdateAppEvent(appState, false, null));
+
+    rmState = store.loadState();
+    ctx = rmState.getApplicationState().get(appId)
+        .getApplicationSubmissionContext();
+
+    assertEquals("ApplicationSchedulingPropertiesMap should not have been "
+        + "pruned from the application submission context before the "
+        + "FINISHED state",
+        schedulingPropertiesMap, ctx.getApplicationSchedulingPropertiesMap());
+
+    appState.setState(RMAppState.FINISHED);
+    store.handleStoreEvent(new RMStateUpdateAppEvent(appState, false, null));
+
+    rmState = store.loadState();
+    ctx = rmState.getApplicationState().get(appId)
+        .getApplicationSubmissionContext();
+
+    assertEquals(appId, ctx.getApplicationId());
+    assertEquals("a_queue", ctx.getQueue());
+    assertNotNull(ctx.getAMContainerSpec());
+    assertEquals(17L, ctx.getResource().getMemorySize());
+    assertEquals("ApplicationSchedulingPropertiesMap should have been pruned"
+        + " from the application submission context when in FINISHED STATE",
+        Collections.emptyMap(), ctx.getApplicationSchedulingPropertiesMap());
+    store.close();
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-7223. Document GPU isolation feature. Contributed by Wangda Tan.

Posted by ha...@apache.org.
YARN-7223. Document GPU isolation feature. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86b227a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86b227a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86b227a1

Branch: refs/heads/HDFS-12996
Commit: 86b227a1fbe26b992c5498cfdd3b1691b4362ee9
Parents: 121e1e1
Author: Sunil G <su...@apache.org>
Authored: Wed Feb 21 14:16:45 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Feb 21 14:16:45 2018 +0530

----------------------------------------------------------------------
 .../src/site/markdown/UsingGpus.md              | 230 +++++++++++++++++++
 1 file changed, 230 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86b227a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
new file mode 100644
index 0000000..f6000e7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/UsingGpus.md
@@ -0,0 +1,230 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+
+# Using GPU On YARN
+# Prerequisites
+
+- As of now, only Nvidia GPUs are supported by YARN
+- YARN node managers have to be pre-installed with Nvidia drivers.
+- When Docker is used as container runtime context, nvidia-docker 1.0 needs to be installed (Current supported version in YARN for nvidia-docker).
+
+# Configs
+
+## GPU scheduling
+
+In `resource-types.xml`
+
+Add following properties
+
+```
+<configuration>
+  <property>
+     <name>yarn.resource-types</name>
+     <value>yarn.io/gpu</value>
+  </property>
+</configuration>
+```
+
+In `yarn-site.xml`
+
+`DominantResourceCalculator` MUST be configured to enable GPU scheduling/isolation.
+
+For `Capacity Scheduler`, use following property to configure `DominantResourceCalculator` (In `capacity-scheduler.xml`):
+
+| Property | Default value |
+| --- | --- |
+| 	yarn.scheduler.capacity.resource-calculator | org.apache.hadoop.yarn.util.resource.DominantResourceCalculator |
+
+
+## GPU Isolation
+
+### In `yarn-site.xml`
+
+```
+  <property>
+    <name>yarn.nodemanager.resource-plugins</name>
+    <value>yarn.io/gpu</value>
+  </property>
+```
+
+This is to enable GPU isolation module on NodeManager side.
+
+By default, YARN will automatically detect and config GPUs when above config is set. Following configs need to be set in `yarn-site.xml` only if admin has specialized requirements.
+
+**1) Allowed GPU Devices**
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices | auto |
+
+  Specify GPU devices which can be managed by YARN NodeManager (split by comma).
+  Number of GPU devices will be reported to RM to make scheduling decisions.
+  Set to auto (default) let YARN automatically discover GPU resource from
+  system.
+
+  Manually specify GPU devices if auto detect GPU device failed or admin
+  only want subset of GPU devices managed by YARN. GPU device is identified
+  by their minor device number and index. A common approach to get minor
+  device number of GPUs is using `nvidia-smi -q` and search `Minor Number`
+  output.
+
+  When minor numbers are specified manually, admin needs to include indice of GPUs
+  as well, format is `index:minor_number[,index:minor_number...]`. An example
+  of manual specification is `0:0,1:1,2:2,3:4"`to allow YARN NodeManager to
+  manage GPU devices with indices `0/1/2/3` and minor number `0/1/2/4`.
+  numbers .
+
+**2) Executable to discover GPUs**
+
+| Property | value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables | /absolute/path/to/nvidia-smi |
+
+When `yarn.nodemanager.resource.gpu.allowed-gpu-devices=auto` specified,
+YARN NodeManager needs to run GPU discovery binary (now only support
+`nvidia-smi`) to get GPU-related information.
+When value is empty (default), YARN NodeManager will try to locate
+discovery executable itself.
+An example of the config value is: `/usr/local/bin/nvidia-smi`
+
+**3) Docker Plugin Related Configs**
+
+Following configs can be customized when user needs to run GPU applications inside Docker container. They're not required if admin follows default installation/configuration of `nvidia-docker`.
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.docker-plugin | nvidia-docker-v1 |
+
+Specify docker command plugin for GPU. By default uses Nvidia docker V1.0.
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidia-docker-v1.endpoint | http://localhost:3476/v1.0/docker/cli |
+
+Specify end point of `nvidia-docker-plugin`. Please find documentation: https://github.com/NVIDIA/nvidia-docker/wiki For more details.
+
+**4) CGroups mount**
+
+GPU isolation uses CGroup [devices controller](https://www.kernel.org/doc/Documentation/cgroup-v1/devices.txt) to do per-GPU device isolation. Following configs should be added to `yarn-site.xml` to automatically mount CGroup sub devices, otherwise admin has to manually create devices subfolder in order to use this feature.
+
+| Property | Default value |
+| --- | --- |
+| yarn.nodemanager.linux-container-executor.cgroups.mount | true |
+
+
+### In `container-executor.cfg`
+
+In general, following config needs to be added to `container-executor.cfg`
+
+```
+[gpu]
+module.enabled=true
+```
+
+When user needs to run GPU applications under non-Docker environment:
+
+```
+[cgroups]
+# This should be same as yarn.nodemanager.linux-container-executor.cgroups.mount-path inside yarn-site.xml
+root=/sys/fs/cgroup
+# This should be same as yarn.nodemanager.linux-container-executor.cgroups.hierarchy inside yarn-site.xml
+yarn-hierarchy=yarn
+```
+
+When user needs to run GPU applications under Docker environment:
+
+**1) Add GPU related devices to docker section:**
+
+Values separated by comma, you can get this by running `ls /dev/nvidia*`
+
+```
+[docker]
+docker.allowed.devices=/dev/nvidiactl,/dev/nvidia-uvm,/dev/nvidia-uvm-tools,/dev/nvidia1,/dev/nvidia0
+```
+
+**2) Add `nvidia-docker` to volume-driver whitelist.**
+
+```
+[docker]
+...
+docker.allowed.volume-drivers
+```
+
+**3) Add `nvidia_driver_<version>` to readonly mounts whitelist.**
+
+```
+[docker]
+...
+docker.allowed.ro-mounts=nvidia_driver_375.66
+```
+
+# Use it
+
+## Distributed-shell + GPU
+
+Distributed shell currently support specify additional resource types other than memory and vcores.
+
+### Distributed-shell + GPU without Docker
+
+Run distributed shell without using docker container (Asks 2 tasks, each task has 3GB memory, 1 vcore, 2 GPU device resource):
+
+```
+yarn jar <path/to/hadoop-yarn-applications-distributedshell.jar> \
+  -jar <path/to/hadoop-yarn-applications-distributedshell.jar> \
+  -shell_command /usr/local/nvidia/bin/nvidia-smi \
+  -container_resources memory-mb=3072,vcores=1,yarn.io/gpu=2 \
+  -num_containers 2
+```
+
+You should be able to see output like
+
+```
+Tue Dec  5 22:21:47 2017
++-----------------------------------------------------------------------------+
+| NVIDIA-SMI 375.66                 Driver Version: 375.66                    |
+|-------------------------------+----------------------+----------------------+
+| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
+| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
+|===============================+======================+======================|
+|   0  Tesla P100-PCIE...  Off  | 0000:04:00.0     Off |                    0 |
+| N/A   30C    P0    24W / 250W |      0MiB / 12193MiB |      0%      Default |
++-------------------------------+----------------------+----------------------+
+|   1  Tesla P100-PCIE...  Off  | 0000:82:00.0     Off |                    0 |
+| N/A   34C    P0    25W / 250W |      0MiB / 12193MiB |      0%      Default |
++-------------------------------+----------------------+----------------------+
+
++-----------------------------------------------------------------------------+
+| Processes:                                                       GPU Memory |
+|  GPU       PID  Type  Process name                               Usage      |
+|=============================================================================|
+|  No running processes found                                                 |
++-----------------------------------------------------------------------------+
+```
+
+For launched container task.
+
+### Distributed-shell + GPU with Docker
+
+You can also run distributed shell with Docker container. `YARN_CONTAINER_RUNTIME_TYPE`/`YARN_CONTAINER_RUNTIME_DOCKER_IMAGE` must be specified to use docker container.
+
+```
+yarn jar <path/to/hadoop-yarn-applications-distributedshell.jar> \
+       -jar <path/to/hadoop-yarn-applications-distributedshell.jar> \
+       -shell_env YARN_CONTAINER_RUNTIME_TYPE=docker \
+       -shell_env YARN_CONTAINER_RUNTIME_DOCKER_IMAGE=<docker-image-name> \
+       -shell_command nvidia-smi \
+       -container_resources memory-mb=3072,vcores=1,yarn.io/gpu=2 \
+       -num_containers 2
+```
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HADOOP-9747. Reduce unnecessary UGI synchronization. Contributed by Daryn Sharp.

Posted by ha...@apache.org.
HADOOP-9747. Reduce unnecessary UGI synchronization. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59cf7588
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59cf7588
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59cf7588

Branch: refs/heads/HDFS-12996
Commit: 59cf7588779145ad5850ad63426743dfe03d8347
Parents: 3688e49
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Feb 23 13:10:56 2018 -0600
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Feb 23 13:10:56 2018 -0600

----------------------------------------------------------------------
 .../hadoop/fs/CommonConfigurationKeys.java      |  11 -
 .../hadoop/security/UserGroupInformation.java   | 898 +++++++++----------
 .../src/main/resources/core-default.xml         |  13 -
 .../hadoop/security/TestUGILoginFromKeytab.java | 404 ++++++++-
 .../hadoop/security/TestUGIWithMiniKdc.java     |  54 +-
 .../security/TestUserGroupInformation.java      | 113 ++-
 6 files changed, 942 insertions(+), 551 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cf7588/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index ba6e4e2..043e52a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -355,17 +355,6 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
     "hadoop.user.group.metrics.percentiles.intervals";
 
-  /* When creating UGI with UserGroupInformation(Subject), treat the passed
-   * subject external if set to true, and assume the owner of the subject
-   * should do the credential renewal.
-   *
-   * This is a temporary config to solve the compatibility issue with
-   * HADOOP-13558 and HADOOP-13805 fix, see the jiras for discussions.
-   */
-  public static final String HADOOP_TREAT_SUBJECT_EXTERNAL_KEY =
-      "hadoop.treat.subject.external";
-  public static final boolean HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT = false;
-
   public static final String RPC_METRICS_QUANTILE_ENABLE =
       "rpc.metrics.quantile.enable";
   public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cf7588/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 726e811..003a51c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -18,8 +18,6 @@
 package org.apache.hadoop.security;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_KEY;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
@@ -42,12 +40,14 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
 
 import javax.security.auth.DestroyFailedException;
 import javax.security.auth.Subject;
@@ -56,6 +56,7 @@ import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.kerberos.KerberosTicket;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
+import javax.security.auth.login.Configuration.Parameters;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginException;
 import javax.security.auth.spi.LoginModule;
@@ -190,10 +191,8 @@ public class UserGroupInformation {
         }
         return true;
       }
-      Principal user = null;
-      // if we are using kerberos, try it out
-      if (isAuthenticationMethodEnabled(AuthenticationMethod.KERBEROS)) {
-        user = getCanonicalUser(KerberosPrincipal.class);
+      Principal user = getCanonicalUser(KerberosPrincipal.class);
+      if (user != null) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("using kerberos user:"+user);
         }
@@ -222,7 +221,11 @@ public class UserGroupInformation {
 
         User userEntry = null;
         try {
-          userEntry = new User(user.getName());
+          // LoginContext will be attached later unless it's an external
+          // subject.
+          AuthenticationMethod authMethod = (user instanceof KerberosPrincipal)
+            ? AuthenticationMethod.KERBEROS : AuthenticationMethod.SIMPLE;
+          userEntry = new User(user.getName(), authMethod, null);
         } catch (Exception e) {
           throw (LoginException)(new LoginException(e.toString()).initCause(e));
         }
@@ -277,28 +280,6 @@ public class UserGroupInformation {
   private static long kerberosMinSecondsBeforeRelogin;
   /** The configuration to use */
 
-  /*
-   * This config is a temporary one for backward compatibility.
-   * It means whether to treat the subject passed to
-   * UserGroupInformation(Subject) as external. If true,
-   * -  no renewal thread will be created to do the renew credential
-   * -  reloginFromKeytab() and reloginFromTicketCache will not renew
-   *    credential.
-   * and it assumes that the owner of the subject to renew; if false, it means
-   * to retain the old behavior prior to fixing HADOOP-13558 and HADOOP-13805.
-   * The default is false.
-   */
-  private static boolean treatSubjectExternal = false;
-
-  /*
-   * Some test need the renewal thread to be created even if it does
-   *   UserGroupInformation.loginUserFromSubject(subject);
-   * The test code may set this variable to true via
-   *   setEnableRenewThreadCreationForTest(boolean)
-   * method.
-   */
-  private static boolean enableRenewThreadCreationForTest = false;
-
   private static Configuration conf;
 
   
@@ -364,15 +345,6 @@ public class UserGroupInformation {
         metrics.getGroupsQuantiles = getGroupsQuantiles;
       }
     }
-
-    treatSubjectExternal = conf.getBoolean(HADOOP_TREAT_SUBJECT_EXTERNAL_KEY,
-        HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT);
-    if (treatSubjectExternal) {
-      LOG.info("Config " + HADOOP_TREAT_SUBJECT_EXTERNAL_KEY + " is set to "
-          + "true, the owner of the subject passed to "
-          + " UserGroupInformation(Subject) is supposed to renew the "
-          + "credential.");
-    }
   }
 
   /**
@@ -389,18 +361,6 @@ public class UserGroupInformation {
 
   @InterfaceAudience.Private
   @VisibleForTesting
-  static void setEnableRenewThreadCreationForTest(boolean b) {
-    enableRenewThreadCreationForTest = b;
-  }
-
-  @InterfaceAudience.Private
-  @VisibleForTesting
-  static boolean getEnableRenewThreadCreationForTest() {
-    return enableRenewThreadCreationForTest;
-  }
-
-  @InterfaceAudience.Private
-  @VisibleForTesting
   public static void reset() {
     authenticationMethod = null;
     conf = null;
@@ -408,7 +368,6 @@ public class UserGroupInformation {
     kerberosMinSecondsBeforeRelogin = 0;
     setLoginUser(null);
     HadoopKerberosName.setRules(null);
-    setEnableRenewThreadCreationForTest(false);
   }
   
   /**
@@ -431,17 +390,13 @@ public class UserGroupInformation {
   /**
    * Information about the logged in user.
    */
-  private static UserGroupInformation loginUser = null;
-  private static String keytabPrincipal = null;
-  private static String keytabFile = null;
+  private static final AtomicReference<UserGroupInformation> loginUserRef =
+    new AtomicReference<>();
 
   private final Subject subject;
   // All non-static fields must be read-only caches that come from the subject.
   private final User user;
-  private final boolean isKeytab;
-  private final boolean isKrbTkt;
-  private final boolean isLoginExternal;
-  
+
   private static String OS_LOGIN_MODULE_NAME;
   private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
   
@@ -540,134 +495,10 @@ public class UserGroupInformation {
       return realUser.toString();
     }
   }
-  
-  /**
-   * A JAAS configuration that defines the login modules that we want
-   * to use for login.
-   */
-  private static class HadoopConfiguration 
-      extends javax.security.auth.login.Configuration {
-    private static final String SIMPLE_CONFIG_NAME = "hadoop-simple";
-    private static final String USER_KERBEROS_CONFIG_NAME = 
-      "hadoop-user-kerberos";
-    private static final String KEYTAB_KERBEROS_CONFIG_NAME = 
-      "hadoop-keytab-kerberos";
 
-    private static final Map<String, String> BASIC_JAAS_OPTIONS =
-      new HashMap<String,String>();
-    static {
-      String jaasEnvVar = System.getenv("HADOOP_JAAS_DEBUG");
-      if (jaasEnvVar != null && "true".equalsIgnoreCase(jaasEnvVar)) {
-        BASIC_JAAS_OPTIONS.put("debug", "true");
-      }
-    }
-    
-    private static final AppConfigurationEntry OS_SPECIFIC_LOGIN =
-      new AppConfigurationEntry(OS_LOGIN_MODULE_NAME,
-                                LoginModuleControlFlag.REQUIRED,
-                                BASIC_JAAS_OPTIONS);
-    private static final AppConfigurationEntry HADOOP_LOGIN =
-      new AppConfigurationEntry(HadoopLoginModule.class.getName(),
-                                LoginModuleControlFlag.REQUIRED,
-                                BASIC_JAAS_OPTIONS);
-    private static final Map<String,String> USER_KERBEROS_OPTIONS = 
-      new HashMap<String,String>();
-    static {
-      if (IBM_JAVA) {
-        USER_KERBEROS_OPTIONS.put("useDefaultCcache", "true");
-      } else {
-        USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
-        USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
-      }
-      String ticketCache = System.getenv("KRB5CCNAME");
-      if (ticketCache != null) {
-        if (IBM_JAVA) {
-          // The first value searched when "useDefaultCcache" is used.
-          System.setProperty("KRB5CCNAME", ticketCache);
-        } else {
-          USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
-        }
-      }
-      USER_KERBEROS_OPTIONS.put("renewTGT", "true");
-      USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
-    }
-    private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
-      new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
-                                LoginModuleControlFlag.OPTIONAL,
-                                USER_KERBEROS_OPTIONS);
-    private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS = 
-      new HashMap<String,String>();
-    static {
-      if (IBM_JAVA) {
-        KEYTAB_KERBEROS_OPTIONS.put("credsType", "both");
-      } else {
-        KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
-        KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
-        KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
-      }
-      KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
-      KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);      
-    }
-    private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
-      new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
-                                LoginModuleControlFlag.REQUIRED,
-                                KEYTAB_KERBEROS_OPTIONS);
-    
-    private static final AppConfigurationEntry[] SIMPLE_CONF = 
-      new AppConfigurationEntry[]{OS_SPECIFIC_LOGIN, HADOOP_LOGIN};
-    
-    private static final AppConfigurationEntry[] USER_KERBEROS_CONF =
-      new AppConfigurationEntry[]{OS_SPECIFIC_LOGIN, USER_KERBEROS_LOGIN,
-                                  HADOOP_LOGIN};
-
-    private static final AppConfigurationEntry[] KEYTAB_KERBEROS_CONF =
-      new AppConfigurationEntry[]{KEYTAB_KERBEROS_LOGIN, HADOOP_LOGIN};
-
-    @Override
-    public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
-      if (SIMPLE_CONFIG_NAME.equals(appName)) {
-        return SIMPLE_CONF;
-      } else if (USER_KERBEROS_CONFIG_NAME.equals(appName)) {
-        return USER_KERBEROS_CONF;
-      } else if (KEYTAB_KERBEROS_CONFIG_NAME.equals(appName)) {
-        if (IBM_JAVA) {
-          KEYTAB_KERBEROS_OPTIONS.put("useKeytab",
-              prependFileAuthority(keytabFile));
-        } else {
-          KEYTAB_KERBEROS_OPTIONS.put("keyTab", keytabFile);
-        }
-        KEYTAB_KERBEROS_OPTIONS.put("principal", keytabPrincipal);
-        return KEYTAB_KERBEROS_CONF;
-      }
-      return null;
-    }
-  }
-
-  private static String prependFileAuthority(String keytabPath) {
-    return keytabPath.startsWith("file://") ? keytabPath
-        : "file://" + keytabPath;
-  }
-
-  /**
-   * Represents a javax.security configuration that is created at runtime.
-   */
-  private static class DynamicConfiguration
-      extends javax.security.auth.login.Configuration {
-    private AppConfigurationEntry[] ace;
-    
-    DynamicConfiguration(AppConfigurationEntry[] ace) {
-      this.ace = ace;
-    }
-    
-    @Override
-    public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
-      return ace;
-    }
-  }
-
-  private static LoginContext
+  private static HadoopLoginContext
   newLoginContext(String appName, Subject subject,
-    javax.security.auth.login.Configuration loginConf)
+                  HadoopConfiguration loginConf)
       throws LoginException {
     // Temporarily switch the thread's ContextClassLoader to match this
     // class's classloader, so that we can properly load HadoopLoginModule
@@ -676,16 +507,20 @@ public class UserGroupInformation {
     ClassLoader oldCCL = t.getContextClassLoader();
     t.setContextClassLoader(HadoopLoginModule.class.getClassLoader());
     try {
-      return new LoginContext(appName, subject, null, loginConf);
+      return new HadoopLoginContext(appName, subject, loginConf);
     } finally {
       t.setContextClassLoader(oldCCL);
     }
   }
 
-  private LoginContext getLogin() {
-    return user.getLogin();
+  // return the LoginContext only if it's managed by the ugi.  externally
+  // managed login contexts will be ignored.
+  private HadoopLoginContext getLogin() {
+    LoginContext login = user.getLogin();
+    return (login instanceof HadoopLoginContext)
+      ? (HadoopLoginContext)login : null;
   }
-  
+
   private void setLogin(LoginContext login) {
     user.setLogin(login);
   }
@@ -698,31 +533,22 @@ public class UserGroupInformation {
    * @param subject the user's subject
    */
   UserGroupInformation(Subject subject) {
-    this(subject, treatSubjectExternal);
-  }
-
-  /**
-   * Create a UGI from the given subject.
-   * @param subject the subject
-   * @param isLoginExternal if the subject's keytab is managed by other UGI.
-   *                       Setting this to true will prevent UGI from attempting
-   *                       to login the keytab, or to renew it.
-   */
-  private UserGroupInformation(Subject subject, final boolean isLoginExternal) {
     this.subject = subject;
+    // do not access ANY private credentials since they are mutable
+    // during a relogin.  no principal locking necessary since
+    // relogin/logout does not remove User principal.
     this.user = subject.getPrincipals(User.class).iterator().next();
-
-    this.isKeytab = KerberosUtil.hasKerberosKeyTab(subject);
-    this.isKrbTkt = KerberosUtil.hasKerberosTicket(subject);
-    this.isLoginExternal = isLoginExternal;
+    if (user == null || user.getName() == null) {
+      throw new IllegalStateException("Subject does not contain a valid User");
+    }
   }
-  
+
   /**
    * checks if logged in using kerberos
    * @return true if the subject logged via keytab or has a Kerberos TGT
    */
   public boolean hasKerberosCredentials() {
-    return isKeytab || isKrbTkt;
+    return user.getAuthenticationMethod() == AuthenticationMethod.KERBEROS;
   }
 
   /**
@@ -732,8 +558,7 @@ public class UserGroupInformation {
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized
-  static UserGroupInformation getCurrentUser() throws IOException {
+  public static UserGroupInformation getCurrentUser() throws IOException {
     AccessControlContext context = AccessController.getContext();
     Subject subject = Subject.getSubject(context);
     if (subject == null || subject.getPrincipals(User.class).isEmpty()) {
@@ -779,53 +604,10 @@ public class UserGroupInformation {
     if (!isAuthenticationMethodEnabled(AuthenticationMethod.KERBEROS)) {
       return getBestUGI(null, user);
     }
-    try {
-      Map<String,String> krbOptions = new HashMap<String,String>();
-      if (IBM_JAVA) {
-        krbOptions.put("useDefaultCcache", "true");
-        // The first value searched when "useDefaultCcache" is used.
-        System.setProperty("KRB5CCNAME", ticketCache);
-      } else {
-        krbOptions.put("doNotPrompt", "true");
-        krbOptions.put("useTicketCache", "true");
-        krbOptions.put("useKeyTab", "false");
-        krbOptions.put("ticketCache", ticketCache);
-      }
-      krbOptions.put("renewTGT", "false");
-      krbOptions.putAll(HadoopConfiguration.BASIC_JAAS_OPTIONS);
-      AppConfigurationEntry ace = new AppConfigurationEntry(
-          KerberosUtil.getKrb5LoginModuleName(),
-          LoginModuleControlFlag.REQUIRED,
-          krbOptions);
-      DynamicConfiguration dynConf =
-          new DynamicConfiguration(new AppConfigurationEntry[]{ ace });
-      LoginContext login = newLoginContext(
-          HadoopConfiguration.USER_KERBEROS_CONFIG_NAME, null, dynConf);
-      login.login();
-
-      Subject loginSubject = login.getSubject();
-      Set<Principal> loginPrincipals = loginSubject.getPrincipals();
-      if (loginPrincipals.isEmpty()) {
-        throw new RuntimeException("No login principals found!");
-      }
-      if (loginPrincipals.size() != 1) {
-        LOG.warn("found more than one principal in the ticket cache file " +
-          ticketCache);
-      }
-      User ugiUser = new User(loginPrincipals.iterator().next().getName(),
-          AuthenticationMethod.KERBEROS, login);
-      loginSubject.getPrincipals().add(ugiUser);
-      UserGroupInformation ugi = new UserGroupInformation(loginSubject, false);
-      ugi.setLogin(login);
-      ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
-      return ugi;
-    } catch (LoginException le) {
-      KerberosAuthException kae =
-          new KerberosAuthException(FAILURE_TO_LOGIN, le);
-      kae.setUser(user);
-      kae.setTicketCacheFile(ticketCache);
-      throw kae;
-    }
+    LoginParams params = new LoginParams();
+    params.put(LoginParam.PRINCIPAL, user);
+    params.put(LoginParam.CCACHE, ticketCache);
+    return doSubjectLogin(null, params);
   }
 
   /**
@@ -848,29 +630,40 @@ public class UserGroupInformation {
       throw new KerberosAuthException(SUBJECT_MUST_CONTAIN_PRINCIPAL);
     }
 
-    KerberosPrincipal principal =
-        subject.getPrincipals(KerberosPrincipal.class).iterator().next();
-
-    User ugiUser = new User(principal.getName(),
-        AuthenticationMethod.KERBEROS, null);
-    subject.getPrincipals().add(ugiUser);
-    UserGroupInformation ugi = new UserGroupInformation(subject);
-    ugi.setLogin(null);
-    ugi.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
-    return ugi;
+    // null params indicate external subject login.  no login context will
+    // be attached.
+    return doSubjectLogin(subject, null);
   }
 
   /**
-   * Get the currently logged in user.
+   * Get the currently logged in user.  If no explicit login has occurred,
+   * the user will automatically be logged in with either kerberos credentials
+   * if available, or as the local OS user, based on security settings.
    * @return the logged in user
    * @throws IOException if login fails
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized 
-  static UserGroupInformation getLoginUser() throws IOException {
+  public static UserGroupInformation getLoginUser() throws IOException {
+    UserGroupInformation loginUser = loginUserRef.get();
+    // a potential race condition exists only for the initial creation of
+    // the login user.  there's no need to penalize all subsequent calls
+    // with sychronization overhead so optimistically create a login user
+    // and discard if we lose the race.
     if (loginUser == null) {
-      loginUserFromSubject(null);
+      UserGroupInformation newLoginUser = createLoginUser(null);
+      do {
+        // it's extremely unlikely that the login user will be non-null
+        // (lost CAS race), but be nulled before the subsequent get, but loop
+        // for correctness.
+        if (loginUserRef.compareAndSet(null, newLoginUser)) {
+          loginUser = newLoginUser;
+          // only spawn renewal if this login user is the winner.
+          loginUser.spawnAutoRenewalThreadForUserCreds(false);
+        } else {
+          loginUser = loginUserRef.get();
+        }
+      } while (loginUser == null);
     }
     return loginUser;
   }
@@ -902,30 +695,15 @@ public class UserGroupInformation {
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized 
-  static void loginUserFromSubject(Subject subject) throws IOException {
-    ensureInitialized();
-    boolean externalSubject = false;
-    try {
-      if (subject == null) {
-        subject = new Subject();
-      } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Treat subject external: " + treatSubjectExternal
-              + ". When true, assuming keytab is managed extenally since "
-              + " logged in from subject");
-        }
-        externalSubject = treatSubjectExternal;
-      }
-      LoginContext login =
-          newLoginContext(authenticationMethod.getLoginAppName(), 
-                          subject, new HadoopConfiguration());
-      login.login();
+  public static void loginUserFromSubject(Subject subject) throws IOException {
+    setLoginUser(createLoginUser(subject));
+  }
 
-      UserGroupInformation realUser =
-          new UserGroupInformation(subject, externalSubject);
-      realUser.setLogin(login);
-      realUser.setAuthenticationMethod(authenticationMethod);
+  private static
+  UserGroupInformation createLoginUser(Subject subject) throws IOException {
+    UserGroupInformation realUser = doSubjectLogin(subject, null);
+    UserGroupInformation loginUser = null;
+    try {
       // If the HADOOP_PROXY_USER environment variable or property
       // is specified, create a proxy user as the logged in user.
       String proxyUser = System.getenv(HADOOP_PROXY_USER);
@@ -974,38 +752,64 @@ public class UserGroupInformation {
         LOG.debug("Loaded {} tokens", cred.numberOfTokens());
         loginUser.addCredentials(cred);
       }
-      loginUser.spawnAutoRenewalThreadForUserCreds();
-    } catch (LoginException le) {
-      LOG.debug("failure to login", le);
-      throw new KerberosAuthException(FAILURE_TO_LOGIN, le);
+    } catch (IOException ioe) {
+      LOG.debug("failure to load login credentials", ioe);
+      throw ioe;
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug("UGI loginUser:"+loginUser);
-    } 
+    }
+    return loginUser;
   }
 
   @InterfaceAudience.Private
   @InterfaceStability.Unstable
   @VisibleForTesting
-  public synchronized static void setLoginUser(UserGroupInformation ugi) {
+  public static void setLoginUser(UserGroupInformation ugi) {
     // if this is to become stable, should probably logout the currently
     // logged in ugi if it's different
-    loginUser = ugi;
+    loginUserRef.set(ugi);
   }
   
+  private String getKeytab() {
+    HadoopLoginContext login = getLogin();
+    return (login != null)
+      ? login.getConfiguration().getParameters().get(LoginParam.KEYTAB)
+      : null;
+  }
+
+  /**
+   * Is the ugi managed by the UGI or an external subject?
+   * @return true if managed by UGI.
+   */
+  private boolean isHadoopLogin() {
+    // checks if the private hadoop login context is managing the ugi.
+    return getLogin() != null;
+  }
+
   /**
-   * Is this user logged in from a keytab file?
+   * Is this user logged in from a keytab file managed by the UGI?
    * @return true if the credentials are from a keytab file.
    */
   public boolean isFromKeytab() {
-    return isKeytab;
+    // can't simply check if keytab is present since a relogin failure will
+    // have removed the keytab from priv creds.  instead, check login params.
+    return hasKerberosCredentials() && isHadoopLogin() && getKeytab() != null;
   }
   
   /**
+   *  Is this user logged in from a ticket (but no keytab) managed by the UGI?
+   * @return true if the credentials are from a ticket cache.
+   */
+  private boolean isFromTicket() {
+    return hasKerberosCredentials() && isHadoopLogin() && getKeytab() == null;
+  }
+
+  /**
    * Get the Kerberos TGT
    * @return the user's TGT or null if none was found
    */
-  private synchronized KerberosTicket getTGT() {
+  private KerberosTicket getTGT() {
     Set<KerberosTicket> tickets = subject
         .getPrivateCredentials(KerberosTicket.class);
     for (KerberosTicket ticket : tickets) {
@@ -1022,23 +826,20 @@ public class UserGroupInformation {
     return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
   }
 
-  /**
-   * Should relogin if security is enabled using Kerberos, and
-   * the Subject is not owned by another UGI.
-   * @return true if this UGI should relogin
-   */
   private boolean shouldRelogin() {
-    return isSecurityEnabled()
-        && user.getAuthenticationMethod() == AuthenticationMethod.KERBEROS
-        && !isLoginExternal;
+    return hasKerberosCredentials() && isHadoopLogin();
   }
 
-  /**Spawn a thread to do periodic renewals of kerberos credentials*/
-  private void spawnAutoRenewalThreadForUserCreds() {
-    if (getEnableRenewThreadCreationForTest()) {
-      LOG.warn("Spawning thread to auto renew user credential since " +
-          " enableRenewThreadCreationForTest was set to true.");
-    } else if (!shouldRelogin() || isKeytab) {
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  @VisibleForTesting
+  /**
+   * Spawn a thread to do periodic renewals of kerberos credentials from
+   * a ticket cache.  NEVER directly call this method.
+   * @param force - used by tests to forcibly spawn thread
+   */
+  void spawnAutoRenewalThreadForUserCreds(boolean force) {
+    if (!force && (!shouldRelogin() || isFromKeytab())) {
       return;
     }
 
@@ -1149,38 +950,16 @@ public class UserGroupInformation {
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized
+  public
   static void loginUserFromKeytab(String user,
                                   String path
                                   ) throws IOException {
     if (!isSecurityEnabled())
       return;
 
-    keytabFile = path;
-    keytabPrincipal = user;
-    Subject subject = new Subject();
-    LoginContext login; 
-    long start = 0;
-    try {
-      login = newLoginContext(HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME,
-            subject, new HadoopConfiguration());
-      start = Time.now();
-      login.login();
-      metrics.loginSuccess.add(Time.now() - start);
-      loginUser = new UserGroupInformation(subject, false);
-      loginUser.setLogin(login);
-      loginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
-    } catch (LoginException le) {
-      if (start > 0) {
-        metrics.loginFailure.add(Time.now() - start);
-      }
-      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
-      kae.setUser(user);
-      kae.setKeytabFile(path);
-      throw kae;
-    }
-    LOG.info("Login successful for user " + keytabPrincipal
-        + " using keytab file " + keytabFile);
+    setLoginUser(loginUserFromKeytabAndReturnUGI(user, path));
+    LOG.info("Login successful for user " + user
+        + " using keytab file " + path);
   }
 
   /**
@@ -1195,11 +974,11 @@ public class UserGroupInformation {
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public void logoutUserFromKeytab() throws IOException {
-    if (!isSecurityEnabled() ||
-        user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS) {
+    if (!hasKerberosCredentials()) {
       return;
     }
-    LoginContext login = getLogin();
+    HadoopLoginContext login = getLogin();
+    String keytabFile = getKeytab();
     if (login == null || keytabFile == null) {
       throw new KerberosAuthException(MUST_FIRST_LOGIN_FROM_KEYTAB);
     }
@@ -1208,9 +987,8 @@ public class UserGroupInformation {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Initiating logout for " + getUserName());
       }
-      synchronized (UserGroupInformation.class) {
-        login.logout();
-      }
+      // hadoop login context internally locks credentials.
+      login.logout();
     } catch (LoginException le) {
       KerberosAuthException kae = new KerberosAuthException(LOGOUT_FAILURE, le);
       kae.setUser(user.toString());
@@ -1218,7 +996,7 @@ public class UserGroupInformation {
       throw kae;
     }
 
-    LOG.info("Logout successful for user " + keytabPrincipal
+    LOG.info("Logout successful for user " + getUserName()
         + " using keytab file " + keytabFile);
   }
   
@@ -1228,18 +1006,8 @@ public class UserGroupInformation {
    * @throws IOException
    * @throws KerberosAuthException if it's a kerberos login exception.
    */
-  public synchronized void checkTGTAndReloginFromKeytab() throws IOException {
-    if (!isSecurityEnabled()
-        || user.getAuthenticationMethod() != AuthenticationMethod.KERBEROS
-        || !isKeytab) {
-      return;
-    }
-    KerberosTicket tgt = getTGT();
-    if (tgt != null && !shouldRenewImmediatelyForTests &&
-        Time.now() < getRefreshTime(tgt)) {
-      return;
-    }
-    reloginFromKeytab();
+  public void checkTGTAndReloginFromKeytab() throws IOException {
+    reloginFromKeytab(true);
   }
 
   // if the first kerberos ticket is not TGT, then remove and destroy it since
@@ -1288,63 +1056,26 @@ public class UserGroupInformation {
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized void reloginFromKeytab() throws IOException {
-    if (!shouldRelogin() || !isKeytab) {
-      return;
-    }
-
-    long now = Time.now();
-    if (!shouldRenewImmediatelyForTests && !hasSufficientTimeElapsed(now)) {
-      return;
-    }
+  public void reloginFromKeytab() throws IOException {
+    reloginFromKeytab(false);
+  }
 
-    KerberosTicket tgt = getTGT();
-    //Return if TGT is valid and is not going to expire soon.
-    if (tgt != null && !shouldRenewImmediatelyForTests &&
-        now < getRefreshTime(tgt)) {
+  private void reloginFromKeytab(boolean checkTGT) throws IOException {
+    if (!shouldRelogin() || !isFromKeytab()) {
       return;
     }
-
-    LoginContext login = getLogin();
-    if (login == null || keytabFile == null) {
+    HadoopLoginContext login = getLogin();
+    if (login == null) {
       throw new KerberosAuthException(MUST_FIRST_LOGIN_FROM_KEYTAB);
     }
-
-    long start = 0;
-    // register most recent relogin attempt
-    user.setLastLogin(now);
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Initiating logout for " + getUserName());
-      }
-      synchronized (UserGroupInformation.class) {
-        // clear up the kerberos state. But the tokens are not cleared! As per
-        // the Java kerberos login module code, only the kerberos credentials
-        // are cleared
-        login.logout();
-        // login and also update the subject field of this instance to
-        // have the new credentials (pass it to the LoginContext constructor)
-        login = newLoginContext(
-            HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, getSubject(),
-            new HadoopConfiguration());
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Initiating re-login for " + keytabPrincipal);
-        }
-        start = Time.now();
-        login.login();
-        fixKerberosTicketOrder();
-        metrics.loginSuccess.add(Time.now() - start);
-        setLogin(login);
-      }
-    } catch (LoginException le) {
-      if (start > 0) {
-        metrics.loginFailure.add(Time.now() - start);
+    if (checkTGT) {
+      KerberosTicket tgt = getTGT();
+      if (tgt != null && !shouldRenewImmediatelyForTests &&
+        Time.now() < getRefreshTime(tgt)) {
+        return;
       }
-      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
-      kae.setPrincipal(keytabPrincipal);
-      kae.setKeytabFile(keytabFile);
-      throw kae;
     }
+    relogin(login);
   }
 
   /**
@@ -1357,14 +1088,31 @@ public class UserGroupInformation {
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized void reloginFromTicketCache() throws IOException {
-    if (!shouldRelogin() || !isKrbTkt) {
+  public void reloginFromTicketCache() throws IOException {
+    if (!shouldRelogin() || !isFromTicket()) {
       return;
     }
-    LoginContext login = getLogin();
+    HadoopLoginContext login = getLogin();
     if (login == null) {
       throw new KerberosAuthException(MUST_FIRST_LOGIN);
     }
+    relogin(login);
+  }
+
+  private void relogin(HadoopLoginContext login) throws IOException {
+    // ensure the relogin is atomic to avoid leaving credentials in an
+    // inconsistent state.  prevents other ugi instances, SASL, and SPNEGO
+    // from accessing or altering credentials during the relogin.
+    synchronized(login.getSubjectLock()) {
+      // another racing thread may have beat us to the relogin.
+      if (login == getLogin()) {
+        unprotectedRelogin(login);
+      }
+    }
+  }
+
+  private void unprotectedRelogin(HadoopLoginContext login) throws IOException {
+    assert Thread.holdsLock(login.getSubjectLock());
     long now = Time.now();
     if (!hasSufficientTimeElapsed(now)) {
       return;
@@ -1381,13 +1129,14 @@ public class UserGroupInformation {
       login.logout();
       //login and also update the subject field of this instance to 
       //have the new credentials (pass it to the LoginContext constructor)
-      login = 
-        newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME, 
-            getSubject(), new HadoopConfiguration());
+      login = newLoginContext(
+        login.getAppName(), login.getSubject(), login.getConfiguration());
       if (LOG.isDebugEnabled()) {
         LOG.debug("Initiating re-login for " + getUserName());
       }
       login.login();
+      // this should be unnecessary.  originally added due to improper locking
+      // of the subject during relogin.
       fixKerberosTicketOrder();
       setLogin(login);
     } catch (LoginException le) {
@@ -1405,52 +1154,22 @@ public class UserGroupInformation {
    * @param path the path to the keytab file
    * @throws IOException if the keytab file can't be read
    */
-  public synchronized
+  public
   static UserGroupInformation loginUserFromKeytabAndReturnUGI(String user,
                                   String path
                                   ) throws IOException {
     if (!isSecurityEnabled())
       return UserGroupInformation.getCurrentUser();
-    String oldKeytabFile = null;
-    String oldKeytabPrincipal = null;
 
-    long start = 0;
-    try {
-      oldKeytabFile = keytabFile;
-      oldKeytabPrincipal = keytabPrincipal;
-      keytabFile = path;
-      keytabPrincipal = user;
-      Subject subject = new Subject();
-      
-      LoginContext login = newLoginContext(
-          HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, subject,
-          new HadoopConfiguration());
-       
-      start = Time.now();
-      login.login();
-      metrics.loginSuccess.add(Time.now() - start);
-      UserGroupInformation newLoginUser =
-          new UserGroupInformation(subject, false);
-      newLoginUser.setLogin(login);
-      newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
-      
-      return newLoginUser;
-    } catch (LoginException le) {
-      if (start > 0) {
-        metrics.loginFailure.add(Time.now() - start);
-      }
-      KerberosAuthException kae = new KerberosAuthException(LOGIN_FAILURE, le);
-      kae.setUser(user);
-      kae.setKeytabFile(path);
-      throw kae;
-    } finally {
-      if(oldKeytabFile != null) keytabFile = oldKeytabFile;
-      if(oldKeytabPrincipal != null) keytabPrincipal = oldKeytabPrincipal;
-    }
+    LoginParams params = new LoginParams();
+    params.put(LoginParam.PRINCIPAL, user);
+    params.put(LoginParam.KEYTAB, path);
+    return doSubjectLogin(null, params);
   }
 
   private boolean hasSufficientTimeElapsed(long now) {
-    if (now - user.getLastLogin() < kerberosMinSecondsBeforeRelogin ) {
+    if (!shouldRenewImmediatelyForTests &&
+        now - user.getLastLogin() < kerberosMinSecondsBeforeRelogin ) {
       LOG.warn("Not attempting to re-login since the last re-login was " +
           "attempted less than " + (kerberosMinSecondsBeforeRelogin/1000) +
           " seconds before. Last Login=" + user.getLastLogin());
@@ -1465,8 +1184,8 @@ public class UserGroupInformation {
    */
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
-  public synchronized static boolean isLoginKeytabBased() throws IOException {
-    return getLoginUser().isKeytab;
+  public static boolean isLoginKeytabBased() throws IOException {
+    return getLoginUser().isFromKeytab();
   }
 
   /**
@@ -1474,7 +1193,7 @@ public class UserGroupInformation {
    * @return true or false
    */
   public static boolean isLoginTicketBased()  throws IOException {
-    return getLoginUser().isKrbTkt;
+    return getLoginUser().isFromTicket();
   }
 
   /**
@@ -1503,7 +1222,7 @@ public class UserGroupInformation {
     }
     Subject subject = new Subject();
     subject.getPrincipals().add(new User(user));
-    UserGroupInformation result = new UserGroupInformation(subject, false);
+    UserGroupInformation result = new UserGroupInformation(subject);
     result.setAuthenticationMethod(authMethod);
     return result;
   }
@@ -1519,7 +1238,7 @@ public class UserGroupInformation {
     SIMPLE(AuthMethod.SIMPLE,
         HadoopConfiguration.SIMPLE_CONFIG_NAME),
     KERBEROS(AuthMethod.KERBEROS,
-        HadoopConfiguration.USER_KERBEROS_CONFIG_NAME),
+        HadoopConfiguration.KERBEROS_CONFIG_NAME),
     TOKEN(AuthMethod.TOKEN),
     CERTIFICATE(null),
     KERBEROS_SSL(null),
@@ -1578,11 +1297,9 @@ public class UserGroupInformation {
     }
     Subject subject = new Subject();
     Set<Principal> principals = subject.getPrincipals();
-    principals.add(new User(user));
+    principals.add(new User(user, AuthenticationMethod.PROXY, null));
     principals.add(new RealUser(realUser));
-    UserGroupInformation result =new UserGroupInformation(subject, false);
-    result.setAuthenticationMethod(AuthenticationMethod.PROXY);
-    return result;
+    return new UserGroupInformation(subject);
   }
 
   /**
@@ -2051,6 +1768,256 @@ public class UserGroupInformation {
   }
 
   /**
+   * Login a subject with the given parameters.  If the subject is null,
+   * the login context used to create the subject will be attached.
+   * @param subject to login, null for new subject.
+   * @param params for login, null for externally managed ugi.
+   * @return UserGroupInformation for subject
+   * @throws IOException
+   */
+  private static UserGroupInformation doSubjectLogin(
+      Subject subject, LoginParams params) throws IOException {
+    ensureInitialized();
+    // initial default login.
+    if (subject == null && params == null) {
+      params = LoginParams.getDefaults();
+    }
+    HadoopConfiguration loginConf = new HadoopConfiguration(params);
+    try {
+      HadoopLoginContext login = newLoginContext(
+        authenticationMethod.getLoginAppName(), subject, loginConf);
+      login.login();
+      UserGroupInformation ugi = new UserGroupInformation(login.getSubject());
+      // attach login context for relogin unless this was a pre-existing
+      // subject.
+      if (subject == null) {
+        params.put(LoginParam.PRINCIPAL, ugi.getUserName());
+        ugi.setLogin(login);
+      }
+      return ugi;
+    } catch (LoginException le) {
+      KerberosAuthException kae =
+        new KerberosAuthException(FAILURE_TO_LOGIN, le);
+      if (params != null) {
+        kae.setPrincipal(params.get(LoginParam.PRINCIPAL));
+        kae.setKeytabFile(params.get(LoginParam.KEYTAB));
+        kae.setTicketCacheFile(params.get(LoginParam.CCACHE));
+      }
+      throw kae;
+    }
+  }
+
+  // parameters associated with kerberos logins.  may be extended to support
+  // additional authentication methods.
+  enum LoginParam {
+    PRINCIPAL,
+    KEYTAB,
+    CCACHE,
+  }
+
+  // explicitly private to prevent external tampering.
+  private static class LoginParams extends EnumMap<LoginParam,String>
+      implements Parameters {
+    LoginParams() {
+      super(LoginParam.class);
+    }
+
+    // do not add null values, nor allow existing values to be overriden.
+    @Override
+    public String put(LoginParam param, String val) {
+      boolean add = val != null && !containsKey(param);
+      return add ? super.put(param, val) : null;
+    }
+
+    static LoginParams getDefaults() {
+      LoginParams params = new LoginParams();
+      params.put(LoginParam.PRINCIPAL, System.getenv("KRB5PRINCIPAL"));
+      params.put(LoginParam.KEYTAB, System.getenv("KRB5KEYTAB"));
+      params.put(LoginParam.CCACHE, System.getenv("KRB5CCNAME"));
+      return params;
+    }
+  }
+
+  // wrapper to allow access to fields necessary to recreate the same login
+  // context for relogin.  explicitly private to prevent external tampering.
+  private static class HadoopLoginContext extends LoginContext {
+    private final String appName;
+    private final HadoopConfiguration conf;
+
+    HadoopLoginContext(String appName, Subject subject,
+                       HadoopConfiguration conf) throws LoginException {
+      super(appName, subject, null, conf);
+      this.appName = appName;
+      this.conf = conf;
+    }
+
+    String getAppName() {
+      return appName;
+    }
+
+    HadoopConfiguration getConfiguration() {
+      return conf;
+    }
+
+    // the locking model for logins cannot rely on ugi instance synchronization
+    // since a subject will be referenced by multiple ugi instances.
+    Object getSubjectLock() {
+      Subject subject = getSubject();
+      // if subject is null, the login context will create the subject
+      // so just lock on this context.
+      return (subject == null) ? this : subject.getPrivateCredentials();
+    }
+
+    @Override
+    public void login() throws LoginException {
+      synchronized(getSubjectLock()) {
+        MutableRate metric = metrics.loginFailure;
+        long start = Time.monotonicNow();
+        try {
+          super.login();
+          metric = metrics.loginSuccess;
+        } finally {
+          metric.add(Time.monotonicNow() - start);
+        }
+      }
+    }
+
+    @Override
+    public void logout() throws LoginException {
+      synchronized(getSubjectLock()) {
+        super.logout();
+      }
+    }
+  }
+
+  /**
+   * A JAAS configuration that defines the login modules that we want
+   * to use for login.
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  private static class HadoopConfiguration
+  extends javax.security.auth.login.Configuration {
+    static final String KRB5_LOGIN_MODULE =
+        KerberosUtil.getKrb5LoginModuleName();
+    static final String SIMPLE_CONFIG_NAME = "hadoop-simple";
+    static final String KERBEROS_CONFIG_NAME = "hadoop-kerberos";
+
+    private static final Map<String, String> BASIC_JAAS_OPTIONS =
+        new HashMap<String,String>();
+    static {
+      if ("true".equalsIgnoreCase(System.getenv("HADOOP_JAAS_DEBUG"))) {
+        BASIC_JAAS_OPTIONS.put("debug", "true");
+      }
+    }
+
+    static final AppConfigurationEntry OS_SPECIFIC_LOGIN =
+        new AppConfigurationEntry(
+            OS_LOGIN_MODULE_NAME,
+            LoginModuleControlFlag.REQUIRED,
+            BASIC_JAAS_OPTIONS);
+
+    static final AppConfigurationEntry HADOOP_LOGIN =
+        new AppConfigurationEntry(
+            HadoopLoginModule.class.getName(),
+            LoginModuleControlFlag.REQUIRED,
+            BASIC_JAAS_OPTIONS);
+
+    private final LoginParams params;
+
+    HadoopConfiguration(LoginParams params) {
+      this.params = params;
+    }
+
+    @Override
+    public LoginParams getParameters() {
+      return params;
+    }
+
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
+      ArrayList<AppConfigurationEntry> entries = new ArrayList<>();
+      // login of external subject passes no params.  technically only
+      // existing credentials should be used but other components expect
+      // the login to succeed with local user fallback if no principal.
+      if (params == null || appName.equals(SIMPLE_CONFIG_NAME)) {
+        entries.add(OS_SPECIFIC_LOGIN);
+      } else if (appName.equals(KERBEROS_CONFIG_NAME)) {
+        // existing semantics are the initial default login allows local user
+        // fallback. this is not allowed when a principal explicitly
+        // specified or during a relogin.
+        if (!params.containsKey(LoginParam.PRINCIPAL)) {
+          entries.add(OS_SPECIFIC_LOGIN);
+        }
+        entries.add(getKerberosEntry());
+      }
+      entries.add(HADOOP_LOGIN);
+      return entries.toArray(new AppConfigurationEntry[0]);
+    }
+
+    private AppConfigurationEntry getKerberosEntry() {
+      final Map<String,String> options = new HashMap<>(BASIC_JAAS_OPTIONS);
+      LoginModuleControlFlag controlFlag = LoginModuleControlFlag.OPTIONAL;
+      // kerberos login is mandatory if principal is specified.  principal
+      // will not be set for initial default login, but will always be set
+      // for relogins.
+      final String principal = params.get(LoginParam.PRINCIPAL);
+      if (principal != null) {
+        options.put("principal", principal);
+        controlFlag = LoginModuleControlFlag.REQUIRED;
+      }
+
+      // use keytab if given else fallback to ticket cache.
+      if (IBM_JAVA) {
+        if (params.containsKey(LoginParam.KEYTAB)) {
+          final String keytab = params.get(LoginParam.KEYTAB);
+          if (keytab != null) {
+            options.put("useKeytab", prependFileAuthority(keytab));
+          } else {
+            options.put("useDefaultKeytab", "true");
+          }
+          options.put("credsType", "both");
+        } else {
+          String ticketCache = params.get(LoginParam.CCACHE);
+          if (ticketCache != null) {
+            options.put("useCcache", prependFileAuthority(ticketCache));
+          } else {
+            options.put("useDefaultCcache", "true");
+          }
+          options.put("renewTGT", "true");
+        }
+      } else {
+        if (params.containsKey(LoginParam.KEYTAB)) {
+          options.put("useKeyTab", "true");
+          final String keytab = params.get(LoginParam.KEYTAB);
+          if (keytab != null) {
+            options.put("keyTab", keytab);
+          }
+          options.put("storeKey", "true");
+        } else {
+          options.put("useTicketCache", "true");
+          String ticketCache = params.get(LoginParam.CCACHE);
+          if (ticketCache != null) {
+            options.put("ticketCache", ticketCache);
+          }
+          options.put("renewTGT", "true");
+        }
+        options.put("doNotPrompt", "true");
+      }
+      options.put("refreshKrb5Config", "true");
+
+      return new AppConfigurationEntry(
+          KRB5_LOGIN_MODULE, controlFlag, options);
+    }
+
+    private static String prependFileAuthority(String keytabPath) {
+      return keytabPath.startsWith("file://")
+          ? keytabPath
+          : "file://" + keytabPath;
+    }
+  }
+
+  /**
    * A test method to print out the current user's UGI.
    * @param args if there are two arguments, read the user from the keytab
    * and print it out.
@@ -2062,7 +2029,7 @@ public class UserGroupInformation {
     ugi.print();
     System.out.println("UGI: " + ugi);
     System.out.println("Auth method " + ugi.user.getAuthenticationMethod());
-    System.out.println("Keytab " + ugi.isKeytab);
+    System.out.println("Keytab " + ugi.isFromKeytab());
     System.out.println("============================================================");
     
     if (args.length == 2) {
@@ -2070,8 +2037,9 @@ public class UserGroupInformation {
       loginUserFromKeytab(args[0], args[1]);
       getCurrentUser().print();
       System.out.println("Keytab: " + ugi);
-      System.out.println("Auth method " + loginUser.user.getAuthenticationMethod());
-      System.out.println("Keytab " + loginUser.isKeytab);
+      UserGroupInformation loginUgi = getLoginUser();
+      System.out.println("Auth method " + loginUgi.getAuthenticationMethod());
+      System.out.println("Keytab " + loginUgi.isFromKeytab());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cf7588/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index b5163a1..ea26d06 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2947,19 +2947,6 @@
     </description>
   </property>
   <property>
-    <name>hadoop.treat.subject.external</name>
-    <value>false</value>
-    <description>
-      When creating UGI with UserGroupInformation(Subject), treat the passed
-      subject external if set to true, and assume the owner of the subject
-      should do the credential renewal.
-
-      When true this property will introduce an incompatible change which
-      may require changes in client code. For more details, see the jiras:
-      HADOOP-13805,HADOOP-13558.
-    </description>
-  </property>
-  <property>
     <name>hadoop.system.tags</name>
     <value>YARN,HDFS,NAMENODE,DATANODE,REQUIRED,SECURITY,KERBEROS,PERFORMANCE,CLIENT
       ,SERVER,DEBUG,DEPRICATED,COMMON,OPTIONAL</value>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cf7588/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index 61fbf89..826e4b2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -21,14 +21,41 @@ package org.apache.hadoop.security;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
+import java.io.IOException;
+import java.security.Principal;
+import java.security.PrivilegedExceptionAction;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.kerberos.KerberosTicket;
+import javax.security.auth.login.LoginContext;
 
 /**
  * Verify UGI login from keytab. Check that the UGI is
@@ -39,6 +66,7 @@ public class TestUGILoginFromKeytab {
 
   private MiniKdc kdc;
   private File workDir;
+  private ExecutorService executor;
 
   @Rule
   public final TemporaryFolder folder = new TemporaryFolder();
@@ -51,9 +79,12 @@ public class TestUGILoginFromKeytab {
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
         "kerberos");
     UserGroupInformation.setConfiguration(conf);
+    UserGroupInformation.setShouldRenewImmediatelyForTests(true);
     workDir = folder.getRoot();
     kdc = new MiniKdc(MiniKdc.createConf(), workDir);
     kdc.start();
+    executor = Executors.newCachedThreadPool();
+
   }
 
   @After
@@ -61,6 +92,9 @@ public class TestUGILoginFromKeytab {
     if (kdc != null) {
       kdc.stop();
     }
+    if (executor != null) {
+      executor.shutdownNow();
+    }
   }
 
   /**
@@ -69,7 +103,6 @@ public class TestUGILoginFromKeytab {
    */
   @Test
   public void testUGILoginFromKeytab() throws Exception {
-    UserGroupInformation.setShouldRenewImmediatelyForTests(true);
     String principal = "foo";
     File keytab = new File(workDir, "foo.keytab");
     kdc.createPrincipal(keytab, principal);
@@ -80,12 +113,379 @@ public class TestUGILoginFromKeytab {
         ugi.isFromKeytab());
 
     // Verify relogin from keytab.
-    User user = ugi.getSubject().getPrincipals(User.class).iterator().next();
+    User user = getUser(ugi.getSubject());
     final long firstLogin = user.getLastLogin();
+    final LoginContext login1 = user.getLogin();
+    Assert.assertNotNull(login1);
+
     ugi.reloginFromKeytab();
     final long secondLogin = user.getLastLogin();
+    final LoginContext login2 = user.getLogin();
     Assert.assertTrue("User should have been able to relogin from keytab",
         secondLogin > firstLogin);
+    Assert.assertNotNull(login2);
+    Assert.assertNotSame(login1, login2);
+  }
+
+  @Test
+  public void testGetUGIFromKnownSubject() throws Exception {
+    KerberosPrincipal principal = new KerberosPrincipal("user");
+    File keytab = new File(workDir, "user.keytab");
+    kdc.createPrincipal(keytab, principal.getName());
+
+    UserGroupInformation ugi1 =
+      UserGroupInformation.loginUserFromKeytabAndReturnUGI(
+        principal.getName(), keytab.getPath());
+    Subject subject = ugi1.getSubject();
+    User user = getUser(subject);
+    Assert.assertNotNull(user);
+    LoginContext login = user.getLogin();
+    Assert.assertNotNull(login);
+
+    // User instance and/or login context should not change.
+    UserGroupInformation ugi2 = UserGroupInformation.getUGIFromSubject(subject);
+    Assert.assertSame(user, getUser(ugi2.getSubject()));
+    Assert.assertSame(login, user.getLogin());
+  }
+
+  @Test
+  public void testGetUGIFromExternalSubject() throws Exception {
+    KerberosPrincipal principal = new KerberosPrincipal("user");
+    File keytab = new File(workDir, "user.keytab");
+    kdc.createPrincipal(keytab, principal.getName());
+
+    UserGroupInformation ugi =
+      UserGroupInformation.loginUserFromKeytabAndReturnUGI(
+        principal.getName(), keytab.getPath());
+    Subject subject = ugi.getSubject();
+    removeUser(subject);
+
+    // first call to get the ugi should add the User instance w/o a login
+    // context.
+    UserGroupInformation ugi1 = UserGroupInformation.getUGIFromSubject(subject);
+    Assert.assertSame(subject, ugi1.getSubject());
+    User user = getUser(subject);
+    Assert.assertNotNull(user);
+    Assert.assertEquals(principal.getName(), user.getName());
+    Assert.assertNull(user.getLogin());
+
+    // subsequent call should not change the existing User instance.
+    UserGroupInformation ugi2 = UserGroupInformation.getUGIFromSubject(subject);
+    Assert.assertSame(subject, ugi2.getSubject());
+    Assert.assertSame(user, getUser(ugi2.getSubject()));
+    Assert.assertNull(user.getLogin());
+  }
+
+  @Test
+  public void testGetUGIFromExternalSubjectWithLogin() throws Exception {
+    KerberosPrincipal principal = new KerberosPrincipal("user");
+    File keytab = new File(workDir, "user.keytab");
+    kdc.createPrincipal(keytab, principal.getName());
+
+    // alter the User's login context to appear to be a foreign and
+    // unmanagable context.
+    UserGroupInformation ugi =
+      UserGroupInformation.loginUserFromKeytabAndReturnUGI(
+        principal.getName(), keytab.getPath());
+    Subject subject = ugi.getSubject();
+    User user = getUser(subject);
+    final LoginContext dummyLogin = Mockito.mock(LoginContext.class);
+    user.setLogin(dummyLogin);
+
+    // nothing should change.
+    UserGroupInformation ugi2 = UserGroupInformation.getUGIFromSubject(subject);
+    Assert.assertSame(subject, ugi2.getSubject());
+    Assert.assertSame(user, getUser(ugi2.getSubject()));
+    Assert.assertSame(dummyLogin, user.getLogin());
+  }
+
+
+  private static KerberosTicket getTicket(UserGroupInformation ugi) {
+    Set<KerberosTicket> tickets =
+        ugi.getSubject().getPrivateCredentials(KerberosTicket.class);
+    return tickets.isEmpty() ? null : tickets.iterator().next();
   }
 
+  // verify ugi has expected principal, a keytab, and has a ticket for
+  // the expected principal.
+  private static KerberosTicket checkTicketAndKeytab(UserGroupInformation ugi,
+      KerberosPrincipal principal, boolean expectIsKeytab) {
+    Assert.assertEquals("wrong principal",
+      principal.getName(), ugi.getUserName());
+    Assert.assertEquals("is not keytab",
+      expectIsKeytab, ugi.isFromKeytab());
+    KerberosTicket ticket = getTicket(ugi);
+    Assert.assertNotNull("no ticket", ticket);
+    Assert.assertEquals("wrong principal", principal, ticket.getClient());
+    return ticket;
+  }
+
+  @Test
+  public void testReloginForUGIFromSubject() throws Exception {
+    KerberosPrincipal principal1 = new KerberosPrincipal("user1");
+    File keytab1 = new File(workDir, "user1.keytab");
+    kdc.createPrincipal(keytab1, principal1.getName());
+
+    KerberosPrincipal principal2 = new KerberosPrincipal("user2");
+    File keytab2 = new File(workDir, "user2.keytab");
+    kdc.createPrincipal(keytab2, principal2.getName());
+
+    // Login a user and remove the User instance so it looks like an
+    // "external" subject.
+    final Subject extSubject =
+      UserGroupInformation.loginUserFromKeytabAndReturnUGI(
+        principal2.getName(), keytab2.getPath()).getSubject();
+    removeUser(extSubject);
+
+    // Login another user.
+    UserGroupInformation.loginUserFromKeytab(
+        principal1.getName(), keytab1.getPath());
+    final UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
+
+    loginUser.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws IOException {
+        KerberosTicket loginTicket =
+            checkTicketAndKeytab(loginUser, principal1, true);
+
+        // get the ugi for the previously logged in subject.
+        UserGroupInformation extSubjectUser =
+            UserGroupInformation.getUGIFromSubject(extSubject);
+        KerberosTicket ticket =
+          checkTicketAndKeytab(extSubjectUser, principal2, false);
+
+        // verify login user got a new ticket.
+        loginUser.reloginFromKeytab();
+        KerberosTicket newLoginTicket =
+            checkTicketAndKeytab(loginUser, principal1, true);
+        Assert.assertNotEquals(loginTicket.getAuthTime(),
+            newLoginTicket.getAuthTime());
+
+        // verify an "external" subject ticket does not change.
+        extSubjectUser.reloginFromKeytab();
+        Assert.assertSame(ticket,
+            checkTicketAndKeytab(extSubjectUser, principal2, false));
+
+        // verify subject ugi relogin did not affect the login user.
+        Assert.assertSame(newLoginTicket,
+            checkTicketAndKeytab(loginUser, principal1, true));
+
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testReloginForLoginFromSubject() throws Exception {
+    KerberosPrincipal principal1 = new KerberosPrincipal("user1");
+    File keytab1 = new File(workDir, "user1.keytab");
+    kdc.createPrincipal(keytab1, principal1.getName());
+
+    KerberosPrincipal principal2 = new KerberosPrincipal("user2");
+    File keytab2 = new File(workDir, "user2.keytab");
+    kdc.createPrincipal(keytab2, principal2.getName());
+
+    // login principal1 with a keytab.
+    UserGroupInformation.loginUserFromKeytab(
+        principal1.getName(), keytab1.getPath());
+    final UserGroupInformation originalLoginUser =
+        UserGroupInformation.getLoginUser();
+    Assert.assertNotNull(getUser(originalLoginUser.getSubject()).getLogin());
+
+    originalLoginUser.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws IOException {
+        KerberosTicket originalLoginUserTicket =
+            checkTicketAndKeytab(originalLoginUser, principal1, true);
+
+        // login principal2 from a subject with keytab.  it's external so
+        // no login context should be attached to the user.
+        final Subject subject =
+          UserGroupInformation.loginUserFromKeytabAndReturnUGI(
+            principal2.getName(), keytab2.getPath()).getSubject();
+        removeUser(subject);
+
+        // verify the new login user is external.
+        UserGroupInformation.loginUserFromSubject(subject);
+        Assert.assertNull(getUser(subject).getLogin());
+        UserGroupInformation extLoginUser =
+          UserGroupInformation.getLoginUser();
+        KerberosTicket extLoginUserTicket =
+            checkTicketAndKeytab(extLoginUser, principal2, false);
+
+        // verify subject-based login user does not get a new ticket, and
+        // original login user not affected.
+        extLoginUser.reloginFromKeytab();
+        Assert.assertSame(extLoginUserTicket,
+          checkTicketAndKeytab(extLoginUser, principal2, false));
+        Assert.assertSame(originalLoginUserTicket,
+          checkTicketAndKeytab(originalLoginUser, principal1, true));
+
+        // verify original login user gets a new ticket, new login user
+        // not affected.
+        originalLoginUser.reloginFromKeytab();
+        Assert.assertNotSame(originalLoginUserTicket,
+            checkTicketAndKeytab(originalLoginUser, principal1, true));
+        Assert.assertSame(extLoginUserTicket,
+            checkTicketAndKeytab(extLoginUser, principal2, false));
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testReloginAfterFailedRelogin() throws Exception {
+    KerberosPrincipal principal = new KerberosPrincipal("user1");
+    File keytab = new File(workDir, "user1.keytab");
+    File keytabBackup = new File(keytab + ".backup");
+    kdc.createPrincipal(keytab, principal.getName());
+
+    UserGroupInformation.loginUserFromKeytab(
+        principal.getName(), keytab.getPath());
+    final UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
+    checkTicketAndKeytab(loginUser, principal, true);
+
+    // move the keytab to induce a relogin failure.
+    Assert.assertTrue(keytab.renameTo(keytabBackup));
+    try {
+      loginUser.reloginFromKeytab();
+      Assert.fail("relogin should fail");
+    } catch (KerberosAuthException kae) {
+      // expected.
+    }
+
+    // even though no KeyTab object, ugi should know it's keytab based.
+    Assert.assertTrue(loginUser.isFromKeytab());
+    Assert.assertNull(getTicket(loginUser));
+
+    // move keytab back to enable relogin to succeed.
+    Assert.assertTrue(keytabBackup.renameTo(keytab));
+    loginUser.reloginFromKeytab();
+    checkTicketAndKeytab(loginUser, principal, true);
+  }
+
+  // verify getting concurrent relogins blocks to avoid indeterminate
+  // credentials corruption, but getting a ugi for the subject does not block.
+  @Test(timeout=180000)
+  public void testConcurrentRelogin() throws Exception {
+    final CyclicBarrier barrier = new CyclicBarrier(2);
+    final CountDownLatch latch = new CountDownLatch(1);
+    assertTrue(UserGroupInformation.isSecurityEnabled());
+
+    KerberosPrincipal principal = new KerberosPrincipal("testUser");
+    File keytab = new File(workDir, "user1.keytab");
+    kdc.createPrincipal(keytab, principal.getName());
+
+    // create a keytab ugi.
+    final UserGroupInformation loginUgi =
+        UserGroupInformation.loginUserFromKeytabAndReturnUGI(
+          principal.getName(), keytab.getPath());
+    assertEquals(AuthenticationMethod.KERBEROS,
+        loginUgi.getAuthenticationMethod());
+    assertTrue(loginUgi.isFromKeytab());
+
+    // create a new ugi instance based on subject from the logged in user.
+    final UserGroupInformation clonedUgi =
+        UserGroupInformation.getUGIFromSubject(loginUgi.getSubject());
+    assertEquals(AuthenticationMethod.KERBEROS,
+        clonedUgi.getAuthenticationMethod());
+    assertTrue(clonedUgi.isFromKeytab());
+
+    // cause first relogin to block on a barrier in logout to verify relogins
+    // are atomic.
+    User user = getUser(loginUgi.getSubject());
+    final LoginContext spyLogin = Mockito.spy(user.getLogin());
+    user.setLogin(spyLogin);
+    Mockito.doAnswer(new Answer<Void>(){
+      @Override
+      public Void answer(InvocationOnMock invocation)
+          throws Throwable {
+        invocation.callRealMethod();
+        latch.countDown();
+        barrier.await();
+        return null;
+      }
+    }).when(spyLogin).logout();
+
+    Future<Void> relogin = executor.submit(
+        new Callable<Void>(){
+          @Override
+          public Void call() throws Exception {
+            Thread.currentThread().setName("relogin");
+            loginUgi.reloginFromKeytab();
+            return null;
+          }
+        });
+    // wait for the thread to block on the barrier in the logout of the
+    // relogin.
+    assertTrue("first relogin didn't block",
+      latch.await(2, TimeUnit.SECONDS));
+
+    // although the logout removed the keytab instance, verify the ugi
+    // knows from its login params that it is supposed to be from a keytab.
+    assertTrue(clonedUgi.isFromKeytab());
+
+    // another concurrent re-login should block.
+    Mockito.doNothing().when(spyLogin).logout();
+    Mockito.doNothing().when(spyLogin).login();
+    Future<UserGroupInformation> clonedRelogin = executor.submit(
+        new Callable<UserGroupInformation>(){
+          @Override
+          public UserGroupInformation call() throws Exception {
+            Thread.currentThread().setName("clonedRelogin");
+            clonedUgi.reloginFromKeytab();
+            return clonedUgi;
+          }
+        });
+
+    try {
+      clonedRelogin.get(2, TimeUnit.SECONDS);
+      fail("second relogin didn't block!");
+    } catch (TimeoutException te) {
+      // expected
+    }
+
+    // concurrent UGI instantiation should not block and again should
+    // know it's supposed to be from a keytab.
+    loginUgi.doAs(new PrivilegedExceptionAction<Void>(){
+      @Override
+      public Void run() throws Exception {
+        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+        assertEquals(principal.getName(), ugi.getUserName());
+        assertTrue(ugi.isFromKeytab());
+        return null;
+      }
+    });
+    clonedUgi.doAs(new PrivilegedExceptionAction<Void>(){
+      @Override
+      public Void run() throws Exception {
+        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+        assertEquals(principal.getName(), ugi.getUserName());
+        assertTrue(ugi.isFromKeytab());
+        return null;
+      }
+    });
+
+    // second relogin should still be blocked until the original relogin
+    // is blocked.
+    assertFalse(clonedRelogin.isDone());
+    barrier.await();
+    relogin.get();
+    clonedRelogin.get();
+  }
+
+  private User getUser(Subject subject) {
+    Iterator<User> iter = subject.getPrincipals(User.class).iterator();
+    return iter.hasNext() ? iter.next() : null;
+  }
+
+  private void removeUser(Subject subject) {
+    // remove User instance so it appears to not be logged in.
+    for (Iterator<Principal> iter = subject.getPrincipals().iterator();
+         iter.hasNext(); ) {
+      if (iter.next() instanceof User) {
+        iter.remove();
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cf7588/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
index 6c94b1d..de74d17 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
@@ -58,9 +58,11 @@ public class TestUGIWithMiniKdc {
 
   private void setupKdc() throws Exception {
     Properties kdcConf = MiniKdc.createConf();
-    // tgt expire time = 30 seconds
-    kdcConf.setProperty(MiniKdc.MAX_TICKET_LIFETIME, "30");
-    kdcConf.setProperty(MiniKdc.MIN_TICKET_LIFETIME, "30");
+    // tgt expire time = 2 seconds.  just testing that renewal thread retries
+    // for expiring tickets, so no need to waste time waiting for expiry to
+    // arrive.
+    kdcConf.setProperty(MiniKdc.MAX_TICKET_LIFETIME, "2");
+    kdcConf.setProperty(MiniKdc.MIN_TICKET_LIFETIME, "2");
     File kdcDir = new File(System.getProperty("test.dir", "target"));
     kdc = new MiniKdc(kdcConf, kdcDir);
     kdc.start();
@@ -70,12 +72,14 @@ public class TestUGIWithMiniKdc {
   public void testAutoRenewalThreadRetryWithKdc() throws Exception {
     GenericTestUtils.setLogLevel(UserGroupInformation.LOG, Level.DEBUG);
     final Configuration conf = new Configuration();
+    // can't rely on standard kinit, else test fails when user running
+    // the test is kinit'ed because the test renews _their TGT_.
+    conf.set("hadoop.kerberos.kinit.command", "bogus-kinit-cmd");
     // Relogin every 1 second
     conf.setLong(HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN, 1);
     SecurityUtil.setAuthenticationMethod(
         UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
-    UserGroupInformation.setEnableRenewThreadCreationForTest(true);
 
     LoginContext loginContext = null;
     try {
@@ -87,44 +91,10 @@ public class TestUGIWithMiniKdc {
       setupKdc();
       kdc.createPrincipal(keytab, principal);
 
-      // client login
-      final Subject subject =
-          new Subject(false, principals, new HashSet<>(), new HashSet<>());
-
-      loginContext = new LoginContext("", subject, null,
-          new javax.security.auth.login.Configuration() {
-            @Override
-            public AppConfigurationEntry[] getAppConfigurationEntry(
-                String name) {
-              Map<String, String> options = new HashMap<>();
-              options.put("principal", principal);
-              options.put("refreshKrb5Config", "true");
-              if (PlatformName.IBM_JAVA) {
-                options.put("useKeytab", keytab.getPath());
-                options.put("credsType", "both");
-              } else {
-                options.put("keyTab", keytab.getPath());
-                options.put("useKeyTab", "true");
-                options.put("storeKey", "true");
-                options.put("doNotPrompt", "true");
-                options.put("useTicketCache", "true");
-                options.put("renewTGT", "true");
-                options.put("isInitiator", Boolean.toString(true));
-              }
-              String ticketCache = System.getenv("KRB5CCNAME");
-              if (ticketCache != null) {
-                options.put("ticketCache", ticketCache);
-              }
-              options.put("debug", "true");
-              return new AppConfigurationEntry[] {new AppConfigurationEntry(
-                  KerberosUtil.getKrb5LoginModuleName(),
-                  AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
-                  options)};
-            }
-          });
-      loginContext.login();
-      final Subject loginSubject = loginContext.getSubject();
-      UserGroupInformation.loginUserFromSubject(loginSubject);
+      UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
+      UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+      // no ticket cache, so force the thread to test for failures.
+      ugi.spawnAutoRenewalThreadForUserCreds(true);
 
       // Verify retry happens. Do not verify retry count to reduce flakiness.
       // Detailed back-off logic is tested separately in

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59cf7588/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index bcb2126..9477990 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -38,6 +38,9 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -53,15 +56,19 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.lang.reflect.Method;
+import java.security.Principal;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.ConcurrentModificationException;
 import java.util.Date;
 import java.util.LinkedHashSet;
 import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TREAT_SUBJECT_EXTERNAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
@@ -112,7 +119,14 @@ public class TestUserGroupInformation {
       throw new RuntimeException("UGI is not using its own security conf!");
     } 
   }
-  
+
+  // must be set immediately to avoid inconsistent testing issues.
+  static {
+    // fake the realm is kerberos is enabled
+    System.setProperty("java.security.krb5.kdc", "");
+    System.setProperty("java.security.krb5.realm", "DEFAULT.REALM");
+  }
+
   /** configure ugi */
   @BeforeClass
   public static void setup() {
@@ -123,9 +137,6 @@ public class TestUserGroupInformation {
     // that finds winutils.exe
     String home = System.getenv("HADOOP_HOME");
     System.setProperty("hadoop.home.dir", (home != null ? home : "."));
-    // fake the realm is kerberos is enabled
-    System.setProperty("java.security.krb5.kdc", "");
-    System.setProperty("java.security.krb5.realm", "DEFAULT.REALM");
   }
   
   @Before
@@ -1021,7 +1032,8 @@ public class TestUserGroupInformation {
     assertTrue(credsugiTokens.contains(token2));
   }
 
-  private void testCheckTGTAfterLoginFromSubjectHelper() throws Exception {
+  @Test
+  public void testCheckTGTAfterLoginFromSubject() throws Exception {
     // security on, default is remove default realm
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
     UserGroupInformation.setConfiguration(conf);
@@ -1043,17 +1055,6 @@ public class TestUserGroupInformation {
     });
   }
 
-  @Test(expected = KerberosAuthException.class)
-  public void testCheckTGTAfterLoginFromSubject() throws Exception {
-    testCheckTGTAfterLoginFromSubjectHelper();
-  }
-
-  @Test
-  public void testCheckTGTAfterLoginFromSubjectFix() throws Exception {
-    conf.setBoolean(HADOOP_TREAT_SUBJECT_EXTERNAL_KEY, true);
-    testCheckTGTAfterLoginFromSubjectHelper();
-  }
-
   @Test
   public void testGetNextRetryTime() throws Exception {
     GenericTestUtils.setLogLevel(UserGroupInformation.LOG, Level.DEBUG);
@@ -1134,4 +1135,80 @@ public class TestUserGroupInformation {
     LOG.info(str);
     assertTrue(str, lower <= lastRetry && lastRetry < upper);
   }
+
+  // verify that getCurrentUser on the same and different subjects can be
+  // concurrent.  Ie. no synchronization.
+  @Test(timeout=8000)
+  public void testConcurrentGetCurrentUser() throws Exception {
+    final CyclicBarrier barrier = new CyclicBarrier(2);
+    final CountDownLatch latch = new CountDownLatch(1);
+
+    final UserGroupInformation testUgi1 =
+        UserGroupInformation.createRemoteUser("testUgi1");
+
+    final UserGroupInformation testUgi2 =
+        UserGroupInformation.createRemoteUser("testUgi2");
+
+    // swap the User with a spy to allow getCurrentUser to block when the
+    // spy is called for the user name.
+    Set<Principal> principals = testUgi1.getSubject().getPrincipals();
+    User user =
+        testUgi1.getSubject().getPrincipals(User.class).iterator().next();
+    final User spyUser = Mockito.spy(user);
+    principals.remove(user);
+    principals.add(spyUser);
+    when(spyUser.getName()).thenAnswer(new Answer<String>(){
+      @Override
+      public String answer(InvocationOnMock invocation) throws Throwable {
+        latch.countDown();
+        barrier.await();
+        return (String)invocation.callRealMethod();
+      }
+    });
+    // wait for the thread to block on the barrier in getCurrentUser.
+    Future<UserGroupInformation> blockingLookup =
+        Executors.newSingleThreadExecutor().submit(
+            new Callable<UserGroupInformation>(){
+              @Override
+              public UserGroupInformation call() throws Exception {
+                return testUgi1.doAs(
+                    new PrivilegedExceptionAction<UserGroupInformation>() {
+                      @Override
+                      public UserGroupInformation run() throws Exception {
+                        return UserGroupInformation.getCurrentUser();
+                      }
+                    });
+              }
+            });
+    latch.await();
+
+    // old versions of mockito synchronize on returning mocked answers so
+    // the blocked getCurrentUser will block all other calls to getName.
+    // workaround this by swapping out the spy with the original User.
+    principals.remove(spyUser);
+    principals.add(user);
+    // concurrent getCurrentUser on ugi1 should not be blocked.
+    UserGroupInformation ugi;
+    ugi = testUgi1.doAs(
+        new PrivilegedExceptionAction<UserGroupInformation>() {
+          @Override
+          public UserGroupInformation run() throws Exception {
+            return UserGroupInformation.getCurrentUser();
+          }
+        });
+    assertSame(testUgi1.getSubject(), ugi.getSubject());
+    // concurrent getCurrentUser on ugi2 should not be blocked.
+    ugi = testUgi2.doAs(
+        new PrivilegedExceptionAction<UserGroupInformation>() {
+          @Override
+          public UserGroupInformation run() throws Exception {
+            return UserGroupInformation.getCurrentUser();
+          }
+        });
+    assertSame(testUgi2.getSubject(), ugi.getSubject());
+
+    // unblock the original call.
+    barrier.await();
+    assertSame(testUgi1.getSubject(), blockingLookup.get().getSubject());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: YARN-7934. [GQ] Refactor preemption calculators to allow overriding for Federation Global Algos. (Contributed by curino)

Posted by ha...@apache.org.
YARN-7934. [GQ] Refactor preemption calculators to allow overriding for Federation Global Algos. (Contributed by curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/514794e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/514794e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/514794e1

Branch: refs/heads/HDFS-12996
Commit: 514794e1a5a39ca61de3981d53a05547ae17f5e4
Parents: 95904f6
Author: Carlo Curino <cu...@apache.org>
Authored: Thu Feb 22 18:12:12 2018 -0800
Committer: Carlo Curino <cu...@apache.org>
Committed: Thu Feb 22 18:12:12 2018 -0800

----------------------------------------------------------------------
 .../AbstractPreemptableResourceCalculator.java  |  38 +++++--
 .../capacity/AbstractPreemptionEntity.java      |   4 +
 .../CapacitySchedulerPreemptionContext.java     |   6 +-
 .../capacity/PreemptableResourceCalculator.java |  21 ++--
 .../monitor/capacity/TempQueuePerPartition.java | 106 +++++++++++++++----
 .../webapp/dao/ResourceInfo.java                |   5 +-
 6 files changed, 139 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/514794e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
index 5196831..2589970 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptableResourceCalculator.java
@@ -18,6 +18,12 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.PriorityQueue;
+
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.PriorityUtilizationQueueOrderingPolicy;
@@ -26,12 +32,6 @@ import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.PriorityQueue;
-
 /**
  * Calculate how much resources need to be preempted for each queue,
  * will be used by {@link PreemptionCandidatesSelector}.
@@ -126,11 +126,18 @@ public class AbstractPreemptableResourceCalculator {
       TempQueuePerPartition q = i.next();
       Resource used = q.getUsed();
 
+      Resource initIdealAssigned;
       if (Resources.greaterThan(rc, totGuarant, used, q.getGuaranteed())) {
-        q.idealAssigned = Resources.add(q.getGuaranteed(), q.untouchableExtra);
+        initIdealAssigned =
+            Resources.add(q.getGuaranteed(), q.untouchableExtra);
       } else {
-        q.idealAssigned = Resources.clone(used);
+        initIdealAssigned = Resources.clone(used);
       }
+
+      // perform initial assignment
+      initIdealAssignment(totGuarant, q, initIdealAssigned);
+
+
       Resources.subtractFrom(unassigned, q.idealAssigned);
       // If idealAssigned < (allocated + used + pending), q needs more
       // resources, so
@@ -188,6 +195,21 @@ public class AbstractPreemptableResourceCalculator {
     }
   }
 
+
+  /**
+   * This method is visible to allow sub-classes to override the initialization
+   * behavior.
+   *
+   * @param totGuarant total resources (useful for {@code ResourceCalculator}
+   *          operations)
+   * @param q the {@code TempQueuePerPartition} being initialized
+   * @param initIdealAssigned the proposed initialization value.
+   */
+  protected void initIdealAssignment(Resource totGuarant,
+      TempQueuePerPartition q, Resource initIdealAssigned) {
+    q.idealAssigned = initIdealAssigned;
+  }
+
   /**
    * Computes a normalizedGuaranteed capacity based on active queues.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/514794e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptionEntity.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptionEntity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptionEntity.java
index dbd1f0a..cb4d7af 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptionEntity.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/AbstractPreemptionEntity.java
@@ -59,6 +59,10 @@ public class AbstractPreemptionEntity {
     this.selected = Resource.newInstance(0, 0);
   }
 
+  public String getQueueName() {
+    return queueName;
+  }
+
   public Resource getUsed() {
     return current;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/514794e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java
index d6f3f6c..098acdd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionContext.java
@@ -30,7 +30,11 @@ import java.util.Collection;
 import java.util.LinkedHashSet;
 import java.util.Set;
 
-interface CapacitySchedulerPreemptionContext {
+/**
+ * This interface provides context for the calculation of ideal allocation
+ * and preemption for the {@code CapacityScheduler}.
+ */
+public interface CapacitySchedulerPreemptionContext {
   CapacityScheduler getScheduler();
 
   TempQueuePerPartition getQueueByPartition(String queueName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/514794e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
index 907785e..2d2cdf6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/PreemptableResourceCalculator.java
@@ -18,6 +18,11 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -26,11 +31,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
 /**
  * Calculate how much resources need to be preempted for each queue,
  * will be used by {@link PreemptionCandidatesSelector}
@@ -70,7 +70,7 @@ public class PreemptableResourceCalculator
    * @param totalPreemptionAllowed total amount of preemption we allow
    * @param tot_guarant the amount of capacity assigned to this pool of queues
    */
-  private void computeIdealResourceDistribution(ResourceCalculator rc,
+  protected void computeIdealResourceDistribution(ResourceCalculator rc,
       List<TempQueuePerPartition> queues, Resource totalPreemptionAllowed,
       Resource tot_guarant) {
 
@@ -138,14 +138,13 @@ public class PreemptableResourceCalculator
   /**
    * This method recursively computes the ideal assignment of resources to each
    * level of the hierarchy. This ensures that leafs that are over-capacity but
-   * with parents within capacity will not be preemptionCandidates. Preemptions are allowed
-   * within each subtree according to local over/under capacity.
+   * with parents within capacity will not be preemptionCandidates. Preemptions
+   * are allowed within each subtree according to local over/under capacity.
    *
    * @param root the root of the cloned queue hierachy
    * @param totalPreemptionAllowed maximum amount of preemption allowed
-   * @return a list of leaf queues updated with preemption targets
    */
-  private void recursivelyComputeIdealAssignment(
+  protected void recursivelyComputeIdealAssignment(
       TempQueuePerPartition root, Resource totalPreemptionAllowed) {
     if (root.getChildren() != null &&
         root.getChildren().size() > 0) {
@@ -242,7 +241,7 @@ public class PreemptableResourceCalculator
 
       // compute the ideal distribution of resources among queues
       // updates cloned queues state accordingly
-      tRoot.idealAssigned = tRoot.getGuaranteed();
+      tRoot.initializeRootIdealWithGuarangeed();
       recursivelyComputeIdealAssignment(tRoot, totalPreemptionAllowed);
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/514794e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
index fdeee52..9d8297d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java
@@ -18,22 +18,20 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
 
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue;
-
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity
-    .ParentQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.LinkedHashMap;
-import java.util.Map;
-
 /**
  * Temporary data-structure tracking resource availability, pending resource
  * need, current utilization. This is per-queue-per-partition data structure
@@ -74,7 +72,8 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
   // idealAssigned, used etc.
   Map<String, TempUserPerPartition> usersPerPartition = new LinkedHashMap<>();
 
-  TempQueuePerPartition(String queueName, Resource current,
+  @SuppressWarnings("checkstyle:parameternumber")
+  public TempQueuePerPartition(String queueName, Resource current,
       boolean preemptionDisabled, String partition, Resource killable,
       float absCapacity, float absMaxCapacity, Resource totalPartitionResource,
       Resource reserved, CSQueue queue, Resource effMinRes,
@@ -94,7 +93,7 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
       pendingDeductReserved = Resources.createResource(0);
     }
 
-    if (ParentQueue.class.isAssignableFrom(queue.getClass())) {
+    if (queue != null && ParentQueue.class.isAssignableFrom(queue.getClass())) {
       parentQueue = (ParentQueue) queue;
     }
 
@@ -179,15 +178,14 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
     // Because for a satisfied parent queue, it could have some under-utilized
     // leaf queues. Such under-utilized leaf queue could preemption resources
     // from over-utilized leaf queue located at other hierarchies.
-    if (null == children || children.isEmpty()) {
-      Resource maxOfGuranteedAndUsedDeductAssigned = Resources.subtract(
-          Resources.max(rc, clusterResource, getUsed(), getGuaranteed()),
-          idealAssigned);
-      maxOfGuranteedAndUsedDeductAssigned = Resources.max(rc, clusterResource,
-          maxOfGuranteedAndUsedDeductAssigned, Resources.none());
-      accepted = Resources.min(rc, clusterResource, accepted,
-          maxOfGuranteedAndUsedDeductAssigned);
-    }
+
+    accepted = filterByMaxDeductAssigned(rc, clusterResource, accepted);
+
+    // accepted so far contains the "quota acceptable" amount, we now filter by
+    // locality acceptable
+
+    accepted = acceptedByLocality(rc, accepted);
+
     Resource remain = Resources.subtract(avail, accepted);
     Resources.addTo(idealAssigned, accepted);
     return remain;
@@ -329,4 +327,72 @@ public class TempQueuePerPartition extends AbstractPreemptionEntity {
   public Map<String, TempUserPerPartition> getUsersPerPartition() {
     return usersPerPartition;
   }
+
+  public void setPending(Resource pending) {
+    this.pending = pending;
+  }
+
+  public Resource getIdealAssigned() {
+    return idealAssigned;
+  }
+
+  public String toGlobalString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("\n").append(toString());
+    for (TempQueuePerPartition c : children) {
+      sb.append(c.toGlobalString());
+    }
+    return sb.toString();
+  }
+
+  /**
+   * This method is visible to allow sub-classes to override the behavior,
+   * specifically to take into account locality-based limitations of how much
+   * the queue can consumed.
+   *
+   * @param rc the ResourceCalculator to be used.
+   * @param offered the input amount of Resource offered to this queue.
+   *
+   * @return  the subset of Resource(s) that the queue can consumed after
+   *          accounting for locality effects.
+   */
+  protected Resource acceptedByLocality(ResourceCalculator rc,
+      Resource offered) {
+    return offered;
+  }
+
+  /**
+   * This method is visible to allow sub-classes to override the behavior,
+   * specifically for federation purposes we do not want to cap resources as it
+   * is done here.
+   *
+   * @param rc the {@code ResourceCalculator} to be used
+   * @param clusterResource the total cluster resources
+   * @param offered the resources offered to this queue
+   * @return the amount of resources accepted after considering max and
+   *         deducting assigned.
+   */
+  protected Resource filterByMaxDeductAssigned(ResourceCalculator rc,
+      Resource clusterResource, Resource offered) {
+    if (null == children || children.isEmpty()) {
+      Resource maxOfGuranteedAndUsedDeductAssigned = Resources.subtract(
+          Resources.max(rc, clusterResource, getUsed(), getGuaranteed()),
+          idealAssigned);
+      maxOfGuranteedAndUsedDeductAssigned = Resources.max(rc, clusterResource,
+          maxOfGuranteedAndUsedDeductAssigned, Resources.none());
+      offered = Resources.min(rc, clusterResource, offered,
+          maxOfGuranteedAndUsedDeductAssigned);
+    }
+    return offered;
+  }
+
+  /**
+   * This method is visible to allow sub-classes to ovverride the behavior,
+   * specifically for federation purposes we need to initialize per-sub-cluster
+   * roots as well as the global one.
+   */
+  protected void initializeRootIdealWithGuarangeed() {
+    idealAssigned = Resources.clone(getGuaranteed());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/514794e1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
index 5bed936..9a335e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
@@ -70,7 +70,7 @@ public class ResourceInfo {
 
   @Override
   public String toString() {
-    return resources.toString();
+    return getResource().toString();
   }
 
   public void setMemory(int memory) {
@@ -90,6 +90,9 @@ public class ResourceInfo {
   }
 
   public Resource getResource() {
+    if (resources == null) {
+      resources = Resource.newInstance(memory, vCores);
+    }
     return Resource.newInstance(resources);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
new file mode 100644
index 0000000..a8e5149
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ColumnRWHelper.java
@@ -0,0 +1,487 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+/**
+ * A set of utility functions that read or read to a column.
+ * This class is meant to be used only by explicit Columns,
+ * and not directly to write by clients.
+ */
+public final class ColumnRWHelper {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ColumnHelper.class);
+
+  private ColumnRWHelper() {
+  }
+
+  /**
+   * Figures out the cell timestamp used in the Put For storing.
+   * Will supplement the timestamp if required. Typically done for flow run
+   * table.If we supplement the timestamp, we left shift the timestamp and
+   * supplement it with the AppId id so that there are no collisions in the flow
+   * run table's cells.
+   */
+  private static long getPutTimestamp(
+      Long timestamp, boolean supplementTs, Attribute[] attributes) {
+    if (timestamp == null) {
+      timestamp = System.currentTimeMillis();
+    }
+    if (!supplementTs) {
+      return timestamp;
+    } else {
+      String appId = getAppIdFromAttributes(attributes);
+      long supplementedTS = TimestampGenerator.getSupplementedTimestamp(
+          timestamp, appId);
+      return supplementedTS;
+    }
+  }
+
+  private static String getAppIdFromAttributes(Attribute[] attributes) {
+    if (attributes == null) {
+      return null;
+    }
+    String appId = null;
+    for (Attribute attribute : attributes) {
+      if (AggregationCompactionDimension.APPLICATION_ID.toString().equals(
+          attribute.getName())) {
+        appId = Bytes.toString(attribute.getValue());
+      }
+    }
+    return appId;
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *          identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *          used to modify the underlying HBase table
+   * @param column the column that is to be modified
+   * @param timestamp
+   *          version timestamp. When null the current timestamp multiplied with
+   *          TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *          app id will be used
+   * @param inputValue
+   *          the value to write to the rowKey and column qualifier. Nothing
+   *          gets written when null.
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *          mutation to table).
+   */
+  public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
+                           Column<?> column, Long timestamp,
+                           Object inputValue, Attribute... attributes)
+      throws IOException {
+    store(rowKey, tableMutator, column.getColumnFamilyBytes(),
+        column.getColumnQualifierBytes(), timestamp,
+        column.supplementCellTimestamp(), inputValue,
+        column.getValueConverter(),
+        column.getCombinedAttrsWithAggr(attributes));
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent over
+   * the wire as part of a batch.
+   *
+   * @param rowKey
+   *          identifying the row to write. Nothing gets written when null.
+   * @param tableMutator
+   *          used to modify the underlying HBase table
+   * @param columnFamilyBytes
+   * @param columnQualifier
+   *          column qualifier. Nothing gets written when null.
+   * @param timestamp
+   *          version timestamp. When null the current timestamp multiplied with
+   *          TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
+   *          app id will be used
+   * @param inputValue
+   *          the value to write to the rowKey and column qualifier. Nothing
+   *          gets written when null.
+   * @param converter
+   * @param attributes Attributes to be set for HBase Put.
+   * @throws IOException if any problem occurs during store operation(sending
+   *          mutation to table).
+   */
+  public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
+      byte[] columnFamilyBytes, byte[] columnQualifier, Long timestamp,
+      boolean supplementTs, Object inputValue, ValueConverter converter,
+      Attribute... attributes) throws IOException {
+    if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) {
+      return;
+    }
+    Put p = new Put(rowKey);
+    timestamp = getPutTimestamp(timestamp, supplementTs, attributes);
+    p.addColumn(columnFamilyBytes, columnQualifier, timestamp,
+        converter.encodeValue(inputValue));
+    if ((attributes != null) && (attributes.length > 0)) {
+      for (Attribute attribute : attributes) {
+        p.setAttribute(attribute.getName(), attribute.getValue());
+      }
+    }
+    tableMutator.mutate(p);
+  }
+
+  /**
+   * Get the latest version of this specified column. Note: this call clones the
+   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
+   *
+   * @param result from which to read the value. Cannot be null
+   * @param columnFamilyBytes
+   * @param columnQualifierBytes referring to the column to be read.
+   * @param converter
+   * @return latest version of the specified column of whichever object was
+   *         written.
+   * @throws IOException if any problem occurs while reading result.
+   */
+  public static Object readResult(Result result, byte[] columnFamilyBytes,
+      byte[] columnQualifierBytes, ValueConverter converter)
+      throws IOException {
+    if (result == null || columnQualifierBytes == null) {
+      return null;
+    }
+
+    // Would have preferred to be able to use getValueAsByteBuffer and get a
+    // ByteBuffer to avoid copy, but GenericObjectMapper doesn't seem to like
+    // that.
+    byte[] value = result.getValue(columnFamilyBytes, columnQualifierBytes);
+    return converter.decodeValue(value);
+  }
+
+  /**
+   * Get the latest version of this specified column. Note: this call clones the
+   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
+   *
+   * @param result from which to read the value. Cannot be null
+   * @param column the column that the result can be parsed to
+   * @return latest version of the specified column of whichever object was
+   *         written.
+   * @throws IOException if any problem occurs while reading result.
+   */
+  public static Object readResult(Result result, Column<?> column)
+      throws IOException {
+    return readResult(result, column.getColumnFamilyBytes(),
+        column.getColumnQualifierBytes(), column.getValueConverter());
+  }
+
+  /**
+   * Get the latest version of this specified column. Note: this call clones the
+   * value content of the hosting {@link org.apache.hadoop.hbase.Cell Cell}.
+   *
+   * @param result Cannot be null
+   * @param columnPrefix column prefix to read from
+   * @param qualifier column qualifier. Nothing gets read when null.
+   * @return result object (can be cast to whatever object was written to) or
+   *         null when specified column qualifier for this prefix doesn't exist
+   *         in the result.
+   * @throws IOException if there is any exception encountered while reading
+   *     result.
+   */
+  public static Object readResult(Result result, ColumnPrefix<?> columnPrefix,
+                                  String qualifier) throws IOException {
+    byte[] columnQualifier = ColumnHelper.getColumnQualifier(
+        columnPrefix.getColumnPrefixInBytes(), qualifier);
+
+    return readResult(
+        result, columnPrefix.getColumnFamilyBytes(),
+        columnQualifier, columnPrefix.getValueConverter());
+  }
+
+  /**
+   *
+   * @param <K> identifies the type of key converter.
+   * @param result from which to read columns.
+   * @param keyConverter used to convert column bytes to the appropriate key
+   *          type
+   * @return the latest values of columns in the column family with this prefix
+   *         (or all of them if the prefix value is null).
+   * @throws IOException if there is any exception encountered while reading
+   *           results.
+   */
+  public static <K> Map<K, Object> readResults(Result result,
+      ColumnPrefix<?> columnPrefix, KeyConverter<K> keyConverter)
+      throws IOException {
+    return readResults(result,
+        columnPrefix.getColumnFamilyBytes(),
+        columnPrefix.getColumnPrefixInBytes(),
+        keyConverter, columnPrefix.getValueConverter());
+  }
+
+  /**
+   * @param result from which to reads data with timestamps.
+   * @param <K> identifies the type of key converter.
+   * @param <V> the type of the values. The values will be cast into that type.
+   * @param keyConverter used to convert column bytes to the appropriate key
+   *     type.
+   * @return the cell values at each respective time in for form
+   *         {@literal {idA={timestamp1->value1}, idA={timestamp2->value2},
+   *         idB={timestamp3->value3}, idC={timestamp1->value4}}}
+   * @throws IOException if there is any exception encountered while reading
+   *     result.
+   */
+  public static <K, V> NavigableMap<K, NavigableMap<Long, V>>
+      readResultsWithTimestamps(Result result, ColumnPrefix<?> columnPrefix,
+      KeyConverter<K> keyConverter) throws IOException {
+    return readResultsWithTimestamps(result,
+        columnPrefix.getColumnFamilyBytes(),
+        columnPrefix.getColumnPrefixInBytes(),
+        keyConverter, columnPrefix.getValueConverter(),
+        columnPrefix.supplementCellTimeStamp());
+  }
+
+  /**
+   * @param result from which to reads data with timestamps
+   * @param columnPrefixBytes optional prefix to limit columns. If null all
+   *          columns are returned.
+   * @param <K> identifies the type of column name(indicated by type of key
+   *     converter).
+   * @param <V> the type of the values. The values will be cast into that type.
+   * @param keyConverter used to convert column bytes to the appropriate key
+   *     type.
+   * @return the cell values at each respective time in for form
+   *         {@literal {idA={timestamp1->value1}, idA={timestamp2->value2},
+   *         idB={timestamp3->value3}, idC={timestamp1->value4}}}
+   * @throws IOException if any problem occurs while reading results.
+   */
+  @SuppressWarnings("unchecked")
+  public static <K, V> NavigableMap<K, NavigableMap<Long, V>>
+      readResultsWithTimestamps(Result result, byte[] columnFamilyBytes,
+          byte[] columnPrefixBytes, KeyConverter<K> keyConverter,
+          ValueConverter valueConverter, boolean supplementTs)
+      throws IOException {
+
+    NavigableMap<K, NavigableMap<Long, V>> results = new TreeMap<>();
+
+    if (result != null) {
+      NavigableMap<
+          byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> resultMap =
+          result.getMap();
+
+      NavigableMap<byte[], NavigableMap<Long, byte[]>> columnCellMap =
+          resultMap.get(columnFamilyBytes);
+      // could be that there is no such column family.
+      if (columnCellMap != null) {
+        for (Map.Entry<byte[], NavigableMap<Long, byte[]>> entry : columnCellMap
+            .entrySet()) {
+          K converterColumnKey = null;
+          if (columnPrefixBytes == null) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("null prefix was specified; returning all columns");
+            }
+            try {
+              converterColumnKey = keyConverter.decode(entry.getKey());
+            } catch (IllegalArgumentException iae) {
+              LOG.error("Illegal column found, skipping this column.", iae);
+              continue;
+            }
+          } else {
+            // A non-null prefix means columns are actually of the form
+            // prefix!columnNameRemainder
+            byte[][] columnNameParts =
+                Separator.QUALIFIERS.split(entry.getKey(), 2);
+            byte[] actualColumnPrefixBytes = columnNameParts[0];
+            if (Bytes.equals(columnPrefixBytes, actualColumnPrefixBytes)
+                && columnNameParts.length == 2) {
+              try {
+                // This is the prefix that we want
+                converterColumnKey = keyConverter.decode(columnNameParts[1]);
+              } catch (IllegalArgumentException iae) {
+                LOG.error("Illegal column found, skipping this column.", iae);
+                continue;
+              }
+            }
+          }
+
+          // If this column has the prefix we want
+          if (converterColumnKey != null) {
+            NavigableMap<Long, V> cellResults =
+                new TreeMap<Long, V>();
+            NavigableMap<Long, byte[]> cells = entry.getValue();
+            if (cells != null) {
+              for (Map.Entry<Long, byte[]> cell : cells.entrySet()) {
+                V value =
+                    (V) valueConverter.decodeValue(cell.getValue());
+                Long ts = supplementTs ? TimestampGenerator.
+                    getTruncatedTimestamp(cell.getKey()) : cell.getKey();
+                cellResults.put(ts, value);
+              }
+            }
+            results.put(converterColumnKey, cellResults);
+          }
+        } // for entry : columnCellMap
+      } // if columnCellMap != null
+    } // if result != null
+    return results;
+  }
+
+  /**
+   * @param <K> identifies the type of column name(indicated by type of key
+   *     converter).
+   * @param result from which to read columns
+   * @param columnPrefixBytes optional prefix to limit columns. If null all
+   *        columns are returned.
+   * @param keyConverter used to convert column bytes to the appropriate key
+   *          type.
+   * @return the latest values of columns in the column family. If the column
+   *         prefix is null, the column qualifier is returned as Strings. For a
+   *         non-null column prefix bytes, the column qualifier is returned as
+   *         a list of parts, each part a byte[]. This is to facilitate
+   *         returning byte arrays of values that were not Strings.
+   * @throws IOException if any problem occurs while reading results.
+   */
+  public static <K> Map<K, Object> readResults(Result result,
+      byte[] columnFamilyBytes, byte[] columnPrefixBytes,
+      KeyConverter<K> keyConverter, ValueConverter valueConverter)
+      throws IOException {
+    Map<K, Object> results = new HashMap<K, Object>();
+
+    if (result != null) {
+      Map<byte[], byte[]> columns = result.getFamilyMap(columnFamilyBytes);
+      for (Map.Entry<byte[], byte[]> entry : columns.entrySet()) {
+        byte[] columnKey = entry.getKey();
+        if (columnKey != null && columnKey.length > 0) {
+
+          K converterColumnKey = null;
+          if (columnPrefixBytes == null) {
+            try {
+              converterColumnKey = keyConverter.decode(columnKey);
+            } catch (IllegalArgumentException iae) {
+              LOG.error("Illegal column found, skipping this column.", iae);
+              continue;
+            }
+          } else {
+            // A non-null prefix means columns are actually of the form
+            // prefix!columnNameRemainder
+            byte[][] columnNameParts = Separator.QUALIFIERS.split(columnKey, 2);
+            if (columnNameParts.length > 0) {
+              byte[] actualColumnPrefixBytes = columnNameParts[0];
+              // If this is the prefix that we want
+              if (Bytes.equals(columnPrefixBytes, actualColumnPrefixBytes)
+                  && columnNameParts.length == 2) {
+                try {
+                  converterColumnKey = keyConverter.decode(columnNameParts[1]);
+                } catch (IllegalArgumentException iae) {
+                  LOG.error("Illegal column found, skipping this column.", iae);
+                  continue;
+                }
+              }
+            }
+          } // if-else
+
+          // If the columnPrefix is null (we want all columns), or the actual
+          // prefix matches the given prefix we want this column
+          if (converterColumnKey != null) {
+            Object value = valueConverter.decodeValue(entry.getValue());
+            // we return the columnQualifier in parts since we don't know
+            // which part is of which data type.
+            results.put(converterColumnKey, value);
+          }
+        }
+      } // for entry
+    }
+    return results;
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent over
+   * the wire as part of a batch.
+   *
+   * @param rowKey identifying the row to write. Nothing gets written when null.
+   * @param tableMutator used to modify the underlying HBase table. Caller is
+   *          responsible to pass a mutator for the table that actually has this
+   *          column.
+   * @param qualifier column qualifier. Nothing gets written when null.
+   * @param timestamp version timestamp. When null the server timestamp will be
+   *          used.
+   * @param attributes attributes for the mutation that are used by the
+   *          coprocessor to set/read the cell tags.
+   * @param inputValue the value to write to the rowKey and column qualifier.
+   *          Nothing gets written when null.
+   * @throws IOException if there is any exception encountered while doing
+   *     store operation(sending mutation to the table).
+   */
+  public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
+             ColumnPrefix<?> columnPrefix, byte[] qualifier, Long timestamp,
+             Object inputValue, Attribute... attributes) throws IOException {
+    // Null check
+    if (qualifier == null) {
+      throw new IOException("Cannot store column with null qualifier in "
+          +tableMutator.getName().getNameAsString());
+    }
+
+    byte[] columnQualifier = columnPrefix.getColumnPrefixBytes(qualifier);
+    Attribute[] combinedAttributes =
+        columnPrefix.getCombinedAttrsWithAggr(attributes);
+
+    store(rowKey, tableMutator, columnPrefix.getColumnFamilyBytes(),
+        columnQualifier, timestamp, columnPrefix.supplementCellTimeStamp(),
+        inputValue, columnPrefix.getValueConverter(), combinedAttributes);
+  }
+
+  /**
+   * Sends a Mutation to the table. The mutations will be buffered and sent over
+   * the wire as part of a batch.
+   *
+   * @param rowKey identifying the row to write. Nothing gets written when null.
+   * @param tableMutator used to modify the underlying HBase table. Caller is
+   *          responsible to pass a mutator for the table that actually has this
+   *          column.
+   * @param qualifier column qualifier. Nothing gets written when null.
+   * @param timestamp version timestamp. When null the server timestamp will be
+   *          used.
+   * @param attributes attributes for the mutation that are used by the
+   *          coprocessor to set/read the cell tags.
+   * @param inputValue the value to write to the rowKey and column qualifier.
+   *          Nothing gets written when null.
+   * @throws IOException if there is any exception encountered while doing
+   *     store operation(sending mutation to the table).
+   */
+  public static void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator,
+             ColumnPrefix<?> columnPrefix, String qualifier, Long timestamp,
+             Object inputValue, Attribute... attributes) throws IOException {
+    // Null check
+    if (qualifier == null) {
+      throw new IOException("Cannot store column with null qualifier in "
+          + tableMutator.getName().getNameAsString());
+    }
+
+    byte[] columnQualifier = columnPrefix.getColumnPrefixBytes(qualifier);
+    Attribute[] combinedAttributes =
+        columnPrefix.getCombinedAttrsWithAggr(attributes);
+
+    store(rowKey, tableMutator, columnPrefix.getColumnFamilyBytes(),
+        columnQualifier, timestamp, columnPrefix.supplementCellTimeStamp(),
+        inputValue, columnPrefix.getValueConverter(), combinedAttributes);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
new file mode 100644
index 0000000..f4cd6fb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.Arrays;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.Query;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * A bunch of utility functions used in HBase TimelineService backend.
+ */
+public final class HBaseTimelineStorageUtils {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HBaseTimelineStorageUtils.class);
+
+  private HBaseTimelineStorageUtils() {
+  }
+
+
+  /**
+   * @param conf YARN configuration. Used to see if there is an explicit config
+   *          pointing to the HBase config file to read. It should not be null
+   *          or a NullPointerException will be thrown.
+   * @return a configuration with the HBase configuration from the classpath,
+   *         optionally overwritten by the timeline service configuration URL if
+   *         specified.
+   * @throws MalformedURLException if a timeline service HBase configuration URL
+   *           is specified but is a malformed URL.
+   */
+  public static Configuration getTimelineServiceHBaseConf(Configuration conf)
+      throws MalformedURLException {
+    if (conf == null) {
+      throw new NullPointerException();
+    }
+
+    Configuration hbaseConf;
+    String timelineServiceHBaseConfFileURL =
+        conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
+    if (timelineServiceHBaseConfFileURL != null
+        && timelineServiceHBaseConfFileURL.length() > 0) {
+      LOG.info("Using hbase configuration at " +
+          timelineServiceHBaseConfFileURL);
+      // create a clone so that we don't mess with out input one
+      hbaseConf = new Configuration(conf);
+      Configuration plainHBaseConf = new Configuration(false);
+      URL hbaseSiteXML = new URL(timelineServiceHBaseConfFileURL);
+      plainHBaseConf.addResource(hbaseSiteXML);
+      HBaseConfiguration.merge(hbaseConf, plainHBaseConf);
+    } else {
+      // default to what is on the classpath
+      hbaseConf = HBaseConfiguration.create(conf);
+    }
+    return hbaseConf;
+  }
+
+  /**
+   * Given a row key prefix stored in a byte array, return a byte array for its
+   * immediate next row key.
+   *
+   * @param rowKeyPrefix The provided row key prefix, represented in an array.
+   * @return the closest next row key of the provided row key.
+   */
+  public static byte[] calculateTheClosestNextRowKeyForPrefix(
+      byte[] rowKeyPrefix) {
+    // Essentially we are treating it like an 'unsigned very very long' and
+    // doing +1 manually.
+    // Search for the place where the trailing 0xFFs start
+    int offset = rowKeyPrefix.length;
+    while (offset > 0) {
+      if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
+        break;
+      }
+      offset--;
+    }
+
+    if (offset == 0) {
+      // We got an 0xFFFF... (only FFs) stopRow value which is
+      // the last possible prefix before the end of the table.
+      // So set it to stop at the 'end of the table'
+      return HConstants.EMPTY_END_ROW;
+    }
+
+    // Copy the right length of the original
+    byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
+    // And increment the last one
+    newStopRow[newStopRow.length - 1]++;
+    return newStopRow;
+  }
+
+  public static void setMetricsTimeRange(Query query, byte[] metricsCf,
+      long tsBegin, long tsEnd) {
+    if (tsBegin != 0 || tsEnd != Long.MAX_VALUE) {
+      query.setColumnFamilyTimeRange(metricsCf,
+          tsBegin, ((tsEnd == Long.MAX_VALUE) ? Long.MAX_VALUE : (tsEnd + 1)));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
new file mode 100644
index 0000000..8e6c259
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimelineHBaseSchemaConstants.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * contains the constants used in the context of schema accesses for
+ * {@link org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity}
+ * information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public final class TimelineHBaseSchemaConstants {
+  private TimelineHBaseSchemaConstants() {
+  }
+
+  /**
+   * Used to create a pre-split for tables starting with a username in the
+   * prefix. TODO: this may have to become a config variable (string with
+   * separators) so that different installations can presplit based on their own
+   * commonly occurring names.
+   */
+  private final static byte[][] USERNAME_SPLITS = {
+      Bytes.toBytes("a"), Bytes.toBytes("ad"), Bytes.toBytes("an"),
+      Bytes.toBytes("b"), Bytes.toBytes("ca"), Bytes.toBytes("cl"),
+      Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"),
+      Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"),
+      Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"),
+      Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"),
+      Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"),
+      Bytes.toBytes("se"), Bytes.toBytes("t"), Bytes.toBytes("u"),
+      Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"),
+      Bytes.toBytes("y"), Bytes.toBytes("z")
+  };
+
+  /**
+   * The length at which keys auto-split.
+   */
+  public static final String USERNAME_SPLIT_KEY_PREFIX_LENGTH = "4";
+
+  /**
+   * @return splits for splits where a user is a prefix.
+   */
+  public static byte[][] getUsernameSplits() {
+    byte[][] kloon = USERNAME_SPLITS.clone();
+    // Deep copy.
+    for (int row = 0; row < USERNAME_SPLITS.length; row++) {
+      kloon[row] = Bytes.copy(USERNAME_SPLITS[row]);
+    }
+    return kloon;
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TypedBufferedMutator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TypedBufferedMutator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TypedBufferedMutator.java
new file mode 100644
index 0000000..29a07e4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TypedBufferedMutator.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.Mutation;
+
+/**
+ * To be used to wrap an actual {@link BufferedMutator} in a type safe manner.
+ *
+ * @param <T> The class referring to the table to be written to.
+ */
+public class TypedBufferedMutator<T extends BaseTable<T>> {
+
+  private final BufferedMutator bufferedMutator;
+
+  /**
+   * @param bufferedMutator the mutator to be wrapped for delegation. Shall not
+   *          be null.
+   */
+  public TypedBufferedMutator(BufferedMutator bufferedMutator) {
+    this.bufferedMutator = bufferedMutator;
+  }
+
+  public TableName getName() {
+    return bufferedMutator.getName();
+  }
+
+  public Configuration getConfiguration() {
+    return bufferedMutator.getConfiguration();
+  }
+
+  public void mutate(Mutation mutation) throws IOException {
+    bufferedMutator.mutate(mutation);
+  }
+
+  public void mutate(List<? extends Mutation> mutations) throws IOException {
+    bufferedMutator.mutate(mutations);
+  }
+
+  public void close() throws IOException {
+    bufferedMutator.close();
+  }
+
+  public void flush() throws IOException {
+    bufferedMutator.flush();
+  }
+
+  public long getWriteBufferSize() {
+    return bufferedMutator.getWriteBufferSize();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
new file mode 100644
index 0000000..0df5b8a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.common contains
+ * a set of utility classes used across backend storage reader and writer.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTableRW.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTableRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTableRW.java
new file mode 100644
index 0000000..111ae71
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTableRW.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineHBaseSchemaConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Create, read and write to the Entity Table.
+ */
+public class EntityTableRW extends BaseTableRW<EntityTable> {
+  /** entity prefix. */
+  private static final String PREFIX =
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + "entity";
+
+  /** config param name that specifies the entity table name. */
+  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
+
+  /**
+   * config param name that specifies the TTL for metrics column family in
+   * entity table.
+   */
+  private static final String METRICS_TTL_CONF_NAME = PREFIX
+      + ".table.metrics.ttl";
+
+  /**
+   * config param name that specifies max-versions for metrics column family in
+   * entity table.
+   */
+  private static final String METRICS_MAX_VERSIONS =
+      PREFIX + ".table.metrics.max-versions";
+
+  /** default value for entity table name. */
+  public static final String DEFAULT_TABLE_NAME = "timelineservice.entity";
+
+  /** default TTL is 30 days for metrics timeseries. */
+  private static final int DEFAULT_METRICS_TTL = 2592000;
+
+  /** default max number of versions. */
+  private static final int DEFAULT_METRICS_MAX_VERSIONS = 10000;
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(EntityTableRW.class);
+
+  public EntityTableRW() {
+    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
+   * createTable(org.apache.hadoop.hbase.client.Admin,
+   * org.apache.hadoop.conf.Configuration)
+   */
+  public void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException {
+
+    TableName table = getTableName(hbaseConf);
+    if (admin.tableExists(table)) {
+      // do not disable / delete existing table
+      // similar to the approach taken by map-reduce jobs when
+      // output directory exists
+      throw new IOException("Table " + table.getNameAsString()
+          + " already exists.");
+    }
+
+    HTableDescriptor entityTableDescp = new HTableDescriptor(table);
+    HColumnDescriptor infoCF =
+        new HColumnDescriptor(EntityColumnFamily.INFO.getBytes());
+    infoCF.setBloomFilterType(BloomType.ROWCOL);
+    entityTableDescp.addFamily(infoCF);
+
+    HColumnDescriptor configCF =
+        new HColumnDescriptor(EntityColumnFamily.CONFIGS.getBytes());
+    configCF.setBloomFilterType(BloomType.ROWCOL);
+    configCF.setBlockCacheEnabled(true);
+    entityTableDescp.addFamily(configCF);
+
+    HColumnDescriptor metricsCF =
+        new HColumnDescriptor(EntityColumnFamily.METRICS.getBytes());
+    entityTableDescp.addFamily(metricsCF);
+    metricsCF.setBlockCacheEnabled(true);
+    // always keep 1 version (the latest)
+    metricsCF.setMinVersions(1);
+    metricsCF.setMaxVersions(
+        hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
+    metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
+        DEFAULT_METRICS_TTL));
+    entityTableDescp.setRegionSplitPolicyClassName(
+        "org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy");
+    entityTableDescp.setValue("KeyPrefixRegionSplitPolicy.prefix_length",
+        TimelineHBaseSchemaConstants.USERNAME_SPLIT_KEY_PREFIX_LENGTH);
+    admin.createTable(entityTableDescp,
+        TimelineHBaseSchemaConstants.getUsernameSplits());
+    LOG.info("Status of table creation for " + table.getNameAsString() + "="
+        + admin.tableExists(table));
+  }
+
+  /**
+   * @param metricsTTL time to live parameter for the metricss in this table.
+   * @param hbaseConf configururation in which to set the metrics TTL config
+   *          variable.
+   */
+  public void setMetricsTTL(int metricsTTL, Configuration hbaseConf) {
+    hbaseConf.setInt(METRICS_TTL_CONF_NAME, metricsTTL);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
new file mode 100644
index 0000000..bb0e331
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.entity
+ * contains classes related to implementation for entity table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTableRW.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTableRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTableRW.java
new file mode 100644
index 0000000..5b9fe13
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTableRW.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Create, read and write to the FlowActivity Table.
+ */
+public class FlowActivityTableRW extends BaseTableRW<FlowActivityTable> {
+  /** flow activity table prefix. */
+  private static final String PREFIX =
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".flowactivity";
+
+  /** config param name that specifies the flowactivity table name. */
+  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
+
+  /** default value for flowactivity table name. */
+  public static final String DEFAULT_TABLE_NAME =
+      "timelineservice.flowactivity";
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FlowActivityTableRW.class);
+
+  /** default max number of versions. */
+  public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE;
+
+  public FlowActivityTableRW() {
+    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
+   * createTable(org.apache.hadoop.hbase.client.Admin,
+   * org.apache.hadoop.conf.Configuration)
+   */
+  public void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException {
+
+    TableName table = getTableName(hbaseConf);
+    if (admin.tableExists(table)) {
+      // do not disable / delete existing table
+      // similar to the approach taken by map-reduce jobs when
+      // output directory exists
+      throw new IOException("Table " + table.getNameAsString()
+          + " already exists.");
+    }
+
+    HTableDescriptor flowActivityTableDescp = new HTableDescriptor(table);
+    HColumnDescriptor infoCF =
+        new HColumnDescriptor(FlowActivityColumnFamily.INFO.getBytes());
+    infoCF.setBloomFilterType(BloomType.ROWCOL);
+    flowActivityTableDescp.addFamily(infoCF);
+    infoCF.setMinVersions(1);
+    infoCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS);
+
+    // TODO: figure the split policy before running in production
+    admin.createTable(flowActivityTableDescp);
+    LOG.info("Status of table creation for " + table.getNameAsString() + "="
+        + admin.tableExists(table));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTableRW.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTableRW.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTableRW.java
new file mode 100644
index 0000000..61c0734
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTableRW.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Coprocessor;
+
+/**
+ * Create, read and write to the FlowRun table.
+ */
+public class FlowRunTableRW extends BaseTableRW<FlowRunTable> {
+  /** entity prefix. */
+  private static final String PREFIX =
+      YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".flowrun";
+
+  /** config param name that specifies the flowrun table name. */
+  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
+
+  /** default value for flowrun table name. */
+  public static final String DEFAULT_TABLE_NAME = "timelineservice.flowrun";
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(FlowRunTableRW.class);
+
+  /** default max number of versions. */
+  public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE;
+
+  public FlowRunTableRW() {
+    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTableRW#
+   * createTable(org.apache.hadoop.hbase.client.Admin,
+   * org.apache.hadoop.conf.Configuration)
+   */
+  public void createTable(Admin admin, Configuration hbaseConf)
+      throws IOException {
+
+    TableName table = getTableName(hbaseConf);
+    if (admin.tableExists(table)) {
+      // do not disable / delete existing table
+      // similar to the approach taken by map-reduce jobs when
+      // output directory exists
+      throw new IOException("Table " + table.getNameAsString()
+          + " already exists.");
+    }
+
+    HTableDescriptor flowRunTableDescp = new HTableDescriptor(table);
+    HColumnDescriptor infoCF =
+        new HColumnDescriptor(FlowRunColumnFamily.INFO.getBytes());
+    infoCF.setBloomFilterType(BloomType.ROWCOL);
+    flowRunTableDescp.addFamily(infoCF);
+    infoCF.setMinVersions(1);
+    infoCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS);
+
+    // TODO: figure the split policy
+    String coprocessorJarPathStr = hbaseConf.get(
+        YarnConfiguration.FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION,
+        YarnConfiguration.DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR);
+
+    Path coprocessorJarPath = new Path(coprocessorJarPathStr);
+    LOG.info("CoprocessorJarPath=" + coprocessorJarPath.toString());
+    flowRunTableDescp.addCoprocessor(
+        "org.apache.hadoop.yarn.server.timelineservice.storage." +
+            "flow.FlowRunCoprocessor", coprocessorJarPath,
+        Coprocessor.PRIORITY_USER, null);
+    admin.createTable(flowRunTableDescp);
+    LOG.info("Status of table creation for " + table.getNameAsString() + "="
+        + admin.tableExists(table));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
new file mode 100644
index 0000000..04963f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.flow
+ * contains classes related to implementation for flow related tables, viz. flow
+ * run table and flow activity table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
new file mode 100644
index 0000000..e78db2a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage contains
+ * classes which define and implement reading and writing to backend storage.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
new file mode 100644
index 0000000..0956f1e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import org.apache.hadoop.yarn.webapp.NotFoundException;
+
+/**
+ * The base class for reading timeline data from the HBase storage. This class
+ * provides basic support to validate and augment reader context.
+ */
+public abstract class AbstractTimelineStorageReader {
+
+  private final TimelineReaderContext context;
+  /**
+   * Used to look up the flow context.
+   */
+  private final AppToFlowTableRW appToFlowTable = new AppToFlowTableRW();
+
+  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
+    context = ctxt;
+  }
+
+  protected TimelineReaderContext getContext() {
+    return context;
+  }
+
+  /**
+   * Looks up flow context from AppToFlow table.
+   *
+   * @param appToFlowRowKey to identify Cluster and App Ids.
+   * @param clusterId the cluster id.
+   * @param hbaseConf HBase configuration.
+   * @param conn HBase Connection.
+   * @return flow context information.
+   * @throws IOException if any problem occurs while fetching flow information.
+   */
+  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
+      String clusterId, Configuration hbaseConf, Connection conn)
+      throws IOException {
+    byte[] rowKey = appToFlowRowKey.getRowKey();
+    Get get = new Get(rowKey);
+    Result result = appToFlowTable.getResult(hbaseConf, conn, get);
+    if (result != null && !result.isEmpty()) {
+      Object flowName = ColumnRWHelper.readResult(
+          result, AppToFlowColumnPrefix.FLOW_NAME, clusterId);
+      Object flowRunId = ColumnRWHelper.readResult(
+          result, AppToFlowColumnPrefix.FLOW_RUN_ID, clusterId);
+      Object userId = ColumnRWHelper.readResult(
+          result, AppToFlowColumnPrefix.USER_ID, clusterId);
+      if (flowName == null || userId == null || flowRunId == null) {
+        throw new NotFoundException(
+            "Unable to find the context flow name, and flow run id, "
+            + "and user id for clusterId=" + clusterId
+            + ", appId=" + appToFlowRowKey.getAppId());
+      }
+      return new FlowContext((String)userId, (String)flowName,
+          ((Number)flowRunId).longValue());
+    } else {
+      throw new NotFoundException(
+          "Unable to find the context flow name, and flow run id, "
+          + "and user id for clusterId=" + clusterId
+          + ", appId=" + appToFlowRowKey.getAppId());
+    }
+  }
+
+  /**
+    * Sets certain parameters to defaults if the values are not provided.
+    *
+    * @param hbaseConf HBase Configuration.
+    * @param conn HBase Connection.
+    * @throws IOException if any exception is encountered while setting params.
+    */
+  protected void augmentParams(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    defaultAugmentParams(hbaseConf, conn);
+  }
+
+  /**
+   * Default behavior for all timeline readers to augment parameters.
+   *
+   * @param hbaseConf HBase Configuration.
+   * @param conn HBase Connection.
+   * @throws IOException if any exception is encountered while setting params.
+   */
+  final protected void defaultAugmentParams(Configuration hbaseConf,
+      Connection conn) throws IOException {
+    // In reality all three should be null or neither should be null
+    if (context.getFlowName() == null || context.getFlowRunId() == null
+        || context.getUserId() == null) {
+      // Get flow context information from AppToFlow table.
+      AppToFlowRowKey appToFlowRowKey =
+          new AppToFlowRowKey(context.getAppId());
+      FlowContext flowContext =
+          lookupFlowContext(appToFlowRowKey, context.getClusterId(), hbaseConf,
+          conn);
+      context.setFlowName(flowContext.flowName);
+      context.setFlowRunId(flowContext.flowRunId);
+      context.setUserId(flowContext.userId);
+    }
+  }
+
+  /**
+   * Validates the required parameters to read the entities.
+   */
+  protected abstract void validateParams();
+
+  /**
+   * Encapsulates flow context information.
+   */
+  protected static class FlowContext {
+    private final String userId;
+    private final String flowName;
+    private final Long flowRunId;
+
+    public FlowContext(String user, String flowName, Long flowRunId) {
+      this.userId = user;
+      this.flowName = flowName;
+      this.flowRunId = flowRunId;
+    }
+
+    protected String getUserId() {
+      return userId;
+    }
+
+    protected String getFlowName() {
+      return flowName;
+    }
+
+    protected Long getFlowRunId() {
+      return flowRunId;
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
new file mode 100644
index 0000000..fb1f774
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the sub app table.
+ */
+public class SubApplicationRowKey {
+  private final String subAppUserId;
+  private final String clusterId;
+  private final String entityType;
+  private final Long entityIdPrefix;
+  private final String entityId;
+  private final String userId;
+  private final SubApplicationRowKeyConverter subAppRowKeyConverter =
+      new SubApplicationRowKeyConverter();
+
+  public SubApplicationRowKey(String subAppUserId, String clusterId,
+      String entityType, Long entityIdPrefix, String entityId, String userId) {
+    this.subAppUserId = subAppUserId;
+    this.clusterId = clusterId;
+    this.entityType = entityType;
+    this.entityIdPrefix = entityIdPrefix;
+    this.entityId = entityId;
+    this.userId = userId;
+  }
+
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  public String getSubAppUserId() {
+    return subAppUserId;
+  }
+
+  public String getEntityType() {
+    return entityType;
+  }
+
+  public String getEntityId() {
+    return entityId;
+  }
+
+  public Long getEntityIdPrefix() {
+    return entityIdPrefix;
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  /**
+   * Constructs a row key for the sub app table as follows:
+   * {@code subAppUserId!clusterId!entityType
+   * !entityPrefix!entityId!userId}.
+   * Typically used while querying a specific sub app.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   * @return byte array with the row key.
+   */
+  public byte[] getRowKey() {
+    return subAppRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey byte representation of row key.
+   * @return An <cite>SubApplicationRowKey</cite> object.
+   */
+  public static SubApplicationRowKey parseRowKey(byte[] rowKey) {
+    return new SubApplicationRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the sub app table as follows:
+   * <p>
+   * {@code subAppUserId!clusterId!
+   * entityType!entityIdPrefix!entityId!userId}.
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that that the AM runs as.
+   *
+   * </p>
+   *
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return subAppRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   *
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>SubApplicationRowKey</cite> object.
+   */
+  public static SubApplicationRowKey parseRowKeyFromString(
+      String encodedRowKey) {
+    return new SubApplicationRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for sub app table.
+   * The row key is of the form :
+   * subAppUserId!clusterId!flowRunId!appId!entityType!entityId!userId
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   * <p>
+   */
+  final private static class SubApplicationRowKeyConverter
+      implements KeyConverter<SubApplicationRowKey>,
+      KeyConverterToString<SubApplicationRowKey> {
+
+    private SubApplicationRowKeyConverter() {
+    }
+
+    /**
+     * sub app row key is of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     * w. each segment separated by !.
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * The sizes below indicate sizes of each one of these
+     * segments in sequence. clusterId, subAppUserId, entityType,
+     * entityId and userId are strings.
+     * entity prefix is a long hence 8 bytes in size. Strings are
+     * variable in size (i.e. end whenever separator is encountered).
+     * This is used while decoding and helps in determining where to split.
+     */
+    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE};
+
+    /*
+     * (non-Javadoc)
+     *
+     * Encodes SubApplicationRowKey object into a byte array with each
+     * component/field in SubApplicationRowKey separated by
+     * Separator#QUALIFIERS.
+     * This leads to an sub app table row key of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * If entityType in passed SubApplicationRowKey object is null (and the
+     * fields preceding it are not null i.e. clusterId, subAppUserId), this
+     * returns a row key prefix of the form subAppUserId!clusterId!
+     * If entityId in SubApplicationRowKey is null
+     * (other components are not null), this returns a row key prefix
+     * of the form subAppUserId!clusterId!entityType!
+     *
+     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#encode(java.lang.Object)
+     */
+    @Override
+    public byte[] encode(SubApplicationRowKey rowKey) {
+      byte[] subAppUser = Separator.encode(rowKey.getSubAppUserId(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+      byte[] cluster = Separator.encode(rowKey.getClusterId(), Separator.SPACE,
+          Separator.TAB, Separator.QUALIFIERS);
+      byte[] first = Separator.QUALIFIERS.join(subAppUser, cluster);
+      if (rowKey.getEntityType() == null) {
+        return first;
+      }
+      byte[] entityType = Separator.encode(rowKey.getEntityType(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+
+      if (rowKey.getEntityIdPrefix() == null) {
+        return Separator.QUALIFIERS.join(first, entityType,
+            Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
+
+      if (rowKey.getEntityId() == null) {
+        return Separator.QUALIFIERS.join(first, entityType, entityIdPrefix,
+            Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
+          Separator.TAB, Separator.QUALIFIERS);
+
+      byte[] userId = Separator.encode(rowKey.getUserId(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
+
+      byte[] second = Separator.QUALIFIERS.join(entityType, entityIdPrefix,
+          entityId, userId);
+
+      return Separator.QUALIFIERS.join(first, second);
+    }
+
+    /*
+     * (non-Javadoc)
+     *
+     * Decodes a sub application row key of the form
+     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
+     *
+     * subAppUserId is usually the doAsUser.
+     * userId is the yarn user that the AM runs as.
+     *
+     * represented in byte format
+     * and converts it into an SubApplicationRowKey object.
+     *
+     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#decode(byte[])
+     */
+    @Override
+    public SubApplicationRowKey decode(byte[] rowKey) {
+      byte[][] rowKeyComponents =
+          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
+      if (rowKeyComponents.length != 6) {
+        throw new IllegalArgumentException(
+            "the row key is not valid for " + "a sub app");
+      }
+      String subAppUserId =
+          Separator.decode(Bytes.toString(rowKeyComponents[0]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String clusterId = Separator.decode(Bytes.toString(rowKeyComponents[1]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String entityType = Separator.decode(Bytes.toString(rowKeyComponents[2]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+
+      Long entityPrefixId = Bytes.toLong(rowKeyComponents[3]);
+
+      String entityId = Separator.decode(Bytes.toString(rowKeyComponents[4]),
+          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String userId =
+          Separator.decode(Bytes.toString(rowKeyComponents[5]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+
+      return new SubApplicationRowKey(subAppUserId, clusterId, entityType,
+          entityPrefixId, entityId, userId);
+    }
+
+    @Override
+    public String encodeAsString(SubApplicationRowKey key) {
+      if (key.subAppUserId == null || key.clusterId == null
+          || key.entityType == null || key.entityIdPrefix == null
+          || key.entityId == null || key.userId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(
+          new String[] {key.subAppUserId, key.clusterId, key.entityType,
+              key.entityIdPrefix.toString(), key.entityId, key.userId});
+    }
+
+    @Override
+    public SubApplicationRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 6) {
+        throw new IllegalArgumentException(
+            "Invalid row key for sub app table.");
+      }
+      Long entityIdPrefix = Long.valueOf(split.get(3));
+      return new SubApplicationRowKey(split.get(0), split.get(1),
+          split.get(2), entityIdPrefix, split.get(4), split.get(5));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
new file mode 100644
index 0000000..0c04959
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+
+/**
+ * Represents a partial rowkey without the entityId or without entityType and
+ * entityId for the sub application table.
+ *
+ */
+public class SubApplicationRowKeyPrefix extends SubApplicationRowKey
+    implements RowKeyPrefix<SubApplicationRowKey> {
+
+  /**
+   * Creates a prefix which generates the following rowKeyPrefixes for the sub
+   * application table:
+   * {@code subAppUserId!clusterId!entityType!entityPrefix!userId}.
+   *
+   * @param subAppUserId
+   *          identifying the subApp User
+   * @param clusterId
+   *          identifying the cluster
+   * @param entityType
+   *          which entity type
+   * @param entityIdPrefix
+   *          for entityId
+   * @param entityId
+   *          for an entity
+   * @param userId
+   *          for the user who runs the AM
+   *
+   * subAppUserId is usually the doAsUser.
+   * userId is the yarn user that the AM runs as.
+   *
+   */
+  public SubApplicationRowKeyPrefix(String subAppUserId, String clusterId,
+      String entityType, Long entityIdPrefix, String entityId,
+      String userId) {
+    super(subAppUserId, clusterId, entityType, entityIdPrefix, entityId,
+        userId);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.
+   * RowKeyPrefix#getRowKeyPrefix()
+   */
+  public byte[] getRowKeyPrefix() {
+    return super.getRowKey();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
new file mode 100644
index 0000000..de7dd4d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationTable.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+
+/**
+ * The sub application table has column families:
+ * info, config and metrics.
+ * Info stores information about a timeline entity object
+ * config stores configuration data of a timeline entity object
+ * metrics stores the metrics of a timeline entity object
+ *
+ * Example sub application table record:
+ *
+ * <pre>
+ * |-------------------------------------------------------------------------|
+ * |  Row          | Column Family             | Column Family| Column Family|
+ * |  key          | info                      | metrics      | config       |
+ * |-------------------------------------------------------------------------|
+ * | subAppUserId! | id:entityId               | metricId1:   | configKey1:  |
+ * | clusterId!    | type:entityType           | metricValue1 | configValue1 |
+ * | entityType!   |                           | @timestamp1  |              |
+ * | idPrefix!|    |                           |              | configKey2:  |
+ * | entityId!     | created_time:             | metricId1:   | configValue2 |
+ * | userId        | 1392993084018             | metricValue2 |              |
+ * |               |                           | @timestamp2  |              |
+ * |               | i!infoKey:                |              |              |
+ * |               | infoValue                 | metricId1:   |              |
+ * |               |                           | metricValue1 |              |
+ * |               |                           | @timestamp2  |              |
+ * |               | e!eventId=timestamp=      |              |              |
+ * |               | infoKey:                  |              |              |
+ * |               | eventInfoValue            |              |              |
+ * |               |                           |              |              |
+ * |               | r!relatesToKey:           |              |              |
+ * |               | id3=id4=id5               |              |              |
+ * |               |                           |              |              |
+ * |               | s!isRelatedToKey          |              |              |
+ * |               | id7=id9=id6               |              |              |
+ * |               |                           |              |              |
+ * |               | flowVersion:              |              |              |
+ * |               | versionValue              |              |              |
+ * |-------------------------------------------------------------------------|
+ * </pre>
+ */
+public final class SubApplicationTable extends BaseTable<SubApplicationTable> {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
new file mode 100644
index 0000000..52cc399
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication
+ * contains classes related to implementation for subapplication table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java
new file mode 100644
index 0000000..0dc344f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestCustomApplicationIdConversion.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test for HBaseTimelineStorageUtils.convertApplicationIdToString(),
+ * a custom conversion from ApplicationId to String that avoids the
+ * incompatibility issue caused by mixing hadoop-common 2.5.1 and
+ * hadoop-yarn-api 3.0. See YARN-6905.
+ */
+public class TestCustomApplicationIdConversion {
+  @Test
+  public void testConvertAplicationIdToString() {
+    ApplicationId applicationId = ApplicationId.newInstance(0, 1);
+    String applicationIdStr =
+        HBaseTimelineSchemaUtils.convertApplicationIdToString(applicationId);
+    Assert.assertEquals(applicationId,
+        ApplicationId.fromString(applicationIdStr));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
new file mode 100644
index 0000000..1bd363f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestKeyConverters.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.junit.Test;
+
+/**
+ * Unit tests for key converters for various tables' row keys.
+ *
+ */
+public class TestKeyConverters {
+
+  @Test
+  public void testAppIdKeyConverter() {
+    AppIdKeyConverter appIdKeyConverter = new AppIdKeyConverter();
+    long currentTs = System.currentTimeMillis();
+    ApplicationId appId1 = ApplicationId.newInstance(currentTs, 1);
+    ApplicationId appId2 = ApplicationId.newInstance(currentTs, 2);
+    ApplicationId appId3 = ApplicationId.newInstance(currentTs + 300, 1);
+    String appIdStr1 = appId1.toString();
+    String appIdStr2 = appId2.toString();
+    String appIdStr3 = appId3.toString();
+    byte[] appIdBytes1 = appIdKeyConverter.encode(appIdStr1);
+    byte[] appIdBytes2 = appIdKeyConverter.encode(appIdStr2);
+    byte[] appIdBytes3 = appIdKeyConverter.encode(appIdStr3);
+    // App ids' should be encoded in a manner wherein descending order
+    // is maintained.
+    assertTrue(
+        "Ordering of app ids' is incorrect",
+        Bytes.compareTo(appIdBytes1, appIdBytes2) > 0
+            && Bytes.compareTo(appIdBytes1, appIdBytes3) > 0
+            && Bytes.compareTo(appIdBytes2, appIdBytes3) > 0);
+    String decodedAppId1 = appIdKeyConverter.decode(appIdBytes1);
+    String decodedAppId2 = appIdKeyConverter.decode(appIdBytes2);
+    String decodedAppId3 = appIdKeyConverter.decode(appIdBytes3);
+    assertTrue("Decoded app id is not same as the app id encoded",
+        appIdStr1.equals(decodedAppId1));
+    assertTrue("Decoded app id is not same as the app id encoded",
+        appIdStr2.equals(decodedAppId2));
+    assertTrue("Decoded app id is not same as the app id encoded",
+        appIdStr3.equals(decodedAppId3));
+  }
+
+  @Test
+  public void testEventColumnNameConverter() {
+    String eventId = "=foo_=eve=nt=";
+    byte[] valSepBytes = Bytes.toBytes(Separator.VALUES.getValue());
+    byte[] maxByteArr =
+        Bytes.createMaxByteArray(Bytes.SIZEOF_LONG - valSepBytes.length);
+    byte[] ts = Bytes.add(valSepBytes, maxByteArr);
+    Long eventTs = Bytes.toLong(ts);
+    byte[] byteEventColName =
+        new EventColumnName(eventId, eventTs, null).getColumnQualifier();
+    KeyConverter<EventColumnName> eventColumnNameConverter =
+        new EventColumnNameConverter();
+    EventColumnName eventColName =
+        eventColumnNameConverter.decode(byteEventColName);
+    assertEquals(eventId, eventColName.getId());
+    assertEquals(eventTs, eventColName.getTimestamp());
+    assertNull(eventColName.getInfoKey());
+
+    String infoKey = "f=oo_event_in=fo=_key";
+    byteEventColName =
+        new EventColumnName(eventId, eventTs, infoKey).getColumnQualifier();
+    eventColName = eventColumnNameConverter.decode(byteEventColName);
+    assertEquals(eventId, eventColName.getId());
+    assertEquals(eventTs, eventColName.getTimestamp());
+    assertEquals(infoKey, eventColName.getInfoKey());
+  }
+
+  @Test
+  public void testLongKeyConverter() {
+    LongKeyConverter longKeyConverter = new LongKeyConverter();
+    confirmLongKeyConverter(longKeyConverter, Long.MIN_VALUE);
+    confirmLongKeyConverter(longKeyConverter, -1234567890L);
+    confirmLongKeyConverter(longKeyConverter, -128L);
+    confirmLongKeyConverter(longKeyConverter, -127L);
+    confirmLongKeyConverter(longKeyConverter, -1L);
+    confirmLongKeyConverter(longKeyConverter, 0L);
+    confirmLongKeyConverter(longKeyConverter, 1L);
+    confirmLongKeyConverter(longKeyConverter, 127L);
+    confirmLongKeyConverter(longKeyConverter, 128L);
+    confirmLongKeyConverter(longKeyConverter, 1234567890L);
+    confirmLongKeyConverter(longKeyConverter, Long.MAX_VALUE);
+  }
+
+  private void confirmLongKeyConverter(LongKeyConverter longKeyConverter,
+      Long testValue) {
+    Long decoded = longKeyConverter.decode(longKeyConverter.encode(testValue));
+    assertEquals(testValue, decoded);
+  }
+
+  @Test
+  public void testStringKeyConverter() {
+    StringKeyConverter stringKeyConverter = new StringKeyConverter();
+    String phrase = "QuackAttack now!";
+
+    for (int i = 0; i < phrase.length(); i++) {
+      String sub = phrase.substring(i, phrase.length());
+      confirmStrignKeyConverter(stringKeyConverter, sub);
+      confirmStrignKeyConverter(stringKeyConverter, sub + sub);
+    }
+  }
+
+  private void confirmStrignKeyConverter(StringKeyConverter stringKeyConverter,
+      String testValue) {
+    String decoded =
+        stringKeyConverter.decode(stringKeyConverter.encode(testValue));
+    assertEquals(testValue, decoded);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
new file mode 100644
index 0000000..d05cbad
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeys.java
@@ -0,0 +1,276 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.junit.Test;
+
+
+/**
+ * Class to test the row key structures for various tables.
+ *
+ */
+public class TestRowKeys {
+
+  private final static String QUALIFIER_SEP = Separator.QUALIFIERS.getValue();
+  private final static byte[] QUALIFIER_SEP_BYTES = Bytes
+      .toBytes(QUALIFIER_SEP);
+  private final static String CLUSTER = "cl" + QUALIFIER_SEP + "uster";
+  private final static String USER = QUALIFIER_SEP + "user";
+  private final static String SUB_APP_USER = QUALIFIER_SEP + "subAppUser";
+  private final static String FLOW_NAME = "dummy_" + QUALIFIER_SEP + "flow"
+      + QUALIFIER_SEP;
+  private final static Long FLOW_RUN_ID;
+  private final static String APPLICATION_ID;
+  static {
+    long runid = Long.MAX_VALUE - 900L;
+    byte[] longMaxByteArr = Bytes.toBytes(Long.MAX_VALUE);
+    byte[] byteArr = Bytes.toBytes(runid);
+    int sepByteLen = QUALIFIER_SEP_BYTES.length;
+    if (sepByteLen <= byteArr.length) {
+      for (int i = 0; i < sepByteLen; i++) {
+        byteArr[i] = (byte) (longMaxByteArr[i] - QUALIFIER_SEP_BYTES[i]);
+      }
+    }
+    FLOW_RUN_ID = Bytes.toLong(byteArr);
+    long clusterTs = System.currentTimeMillis();
+    byteArr = Bytes.toBytes(clusterTs);
+    if (sepByteLen <= byteArr.length) {
+      for (int i = 0; i < sepByteLen; i++) {
+        byteArr[byteArr.length - sepByteLen + i] =
+            (byte) (longMaxByteArr[byteArr.length - sepByteLen + i] -
+                QUALIFIER_SEP_BYTES[i]);
+      }
+    }
+    clusterTs = Bytes.toLong(byteArr);
+    int seqId = 222;
+    APPLICATION_ID = ApplicationId.newInstance(clusterTs, seqId).toString();
+  }
+
+  private static void verifyRowPrefixBytes(byte[] byteRowKeyPrefix) {
+    int sepLen = QUALIFIER_SEP_BYTES.length;
+    for (int i = 0; i < sepLen; i++) {
+      assertTrue(
+          "Row key prefix not encoded properly.",
+          byteRowKeyPrefix[byteRowKeyPrefix.length - sepLen + i] ==
+              QUALIFIER_SEP_BYTES[i]);
+    }
+  }
+
+  @Test
+  public void testApplicationRowKey() {
+    byte[] byteRowKey =
+        new ApplicationRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
+            APPLICATION_ID).getRowKey();
+    ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(byteRowKey);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+
+    byte[] byteRowKeyPrefix =
+        new ApplicationRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID)
+            .getRowKeyPrefix();
+    byte[][] splits =
+        Separator.QUALIFIERS.split(byteRowKeyPrefix,
+            new int[] {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
+                Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+                Separator.VARIABLE_SIZE});
+    assertEquals(5, splits.length);
+    assertEquals(0, splits[4].length);
+    assertEquals(FLOW_NAME,
+        Separator.QUALIFIERS.decode(Bytes.toString(splits[2])));
+    assertEquals(FLOW_RUN_ID,
+        (Long) LongConverter.invertLong(Bytes.toLong(splits[3])));
+    verifyRowPrefixBytes(byteRowKeyPrefix);
+
+    byteRowKeyPrefix =
+        new ApplicationRowKeyPrefix(CLUSTER, USER, FLOW_NAME).getRowKeyPrefix();
+    splits =
+        Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] {
+            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
+            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
+    assertEquals(4, splits.length);
+    assertEquals(0, splits[3].length);
+    assertEquals(FLOW_NAME,
+        Separator.QUALIFIERS.decode(Bytes.toString(splits[2])));
+    verifyRowPrefixBytes(byteRowKeyPrefix);
+  }
+
+  /**
+   * Tests the converters indirectly through the public methods of the
+   * corresponding rowkey.
+   */
+  @Test
+  public void testAppToFlowRowKey() {
+    byte[] byteRowKey = new AppToFlowRowKey(APPLICATION_ID).getRowKey();
+    AppToFlowRowKey rowKey = AppToFlowRowKey.parseRowKey(byteRowKey);
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+  }
+
+  @Test
+  public void testEntityRowKey() {
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId("!ent!ity!!id!");
+    entity.setType("entity!Type");
+    entity.setIdPrefix(54321);
+
+    byte[] byteRowKey =
+        new EntityRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID,
+            entity.getType(), entity.getIdPrefix(),
+            entity.getId()).getRowKey();
+    EntityRowKey rowKey = EntityRowKey.parseRowKey(byteRowKey);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+
+    byte[] byteRowKeyPrefix =
+        new EntityRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
+            APPLICATION_ID, entity.getType(), null, null)
+                .getRowKeyPrefix();
+    byte[][] splits =
+        Separator.QUALIFIERS.split(
+            byteRowKeyPrefix,
+            new int[] {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
+                Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+                AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE,
+                Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE });
+    assertEquals(7, splits.length);
+    assertEquals(APPLICATION_ID, new AppIdKeyConverter().decode(splits[4]));
+    assertEquals(entity.getType(),
+        Separator.QUALIFIERS.decode(Bytes.toString(splits[5])));
+    verifyRowPrefixBytes(byteRowKeyPrefix);
+
+    byteRowKeyPrefix =
+        new EntityRowKeyPrefix(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID,
+            APPLICATION_ID).getRowKeyPrefix();
+    splits =
+        Separator.QUALIFIERS.split(
+            byteRowKeyPrefix,
+            new int[] {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
+                Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+                AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE});
+    assertEquals(6, splits.length);
+    assertEquals(0, splits[5].length);
+    AppIdKeyConverter appIdKeyConverter = new AppIdKeyConverter();
+    assertEquals(APPLICATION_ID, appIdKeyConverter.decode(splits[4]));
+    verifyRowPrefixBytes(byteRowKeyPrefix);
+  }
+
+  @Test
+  public void testFlowActivityRowKey() {
+    Long ts = 1459900830000L;
+    Long dayTimestamp = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(ts);
+    byte[] byteRowKey =
+        new FlowActivityRowKey(CLUSTER, ts, USER, FLOW_NAME).getRowKey();
+    FlowActivityRowKey rowKey = FlowActivityRowKey.parseRowKey(byteRowKey);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+
+    byte[] byteRowKeyPrefix =
+        new FlowActivityRowKeyPrefix(CLUSTER).getRowKeyPrefix();
+    byte[][] splits =
+        Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] {
+            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
+    assertEquals(2, splits.length);
+    assertEquals(0, splits[1].length);
+    assertEquals(CLUSTER,
+        Separator.QUALIFIERS.decode(Bytes.toString(splits[0])));
+    verifyRowPrefixBytes(byteRowKeyPrefix);
+
+    byteRowKeyPrefix =
+        new FlowActivityRowKeyPrefix(CLUSTER, ts).getRowKeyPrefix();
+    splits =
+        Separator.QUALIFIERS.split(byteRowKeyPrefix,
+            new int[] {Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+                Separator.VARIABLE_SIZE});
+    assertEquals(3, splits.length);
+    assertEquals(0, splits[2].length);
+    assertEquals(CLUSTER,
+        Separator.QUALIFIERS.decode(Bytes.toString(splits[0])));
+    assertEquals(ts,
+        (Long) LongConverter.invertLong(Bytes.toLong(splits[1])));
+    verifyRowPrefixBytes(byteRowKeyPrefix);
+  }
+
+  @Test
+  public void testFlowRunRowKey() {
+    byte[] byteRowKey =
+        new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID).getRowKey();
+    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(byteRowKey);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+
+    byte[] byteRowKeyPrefix =
+        new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, null).getRowKey();
+    byte[][] splits =
+        Separator.QUALIFIERS.split(byteRowKeyPrefix, new int[] {
+            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
+            Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE });
+    assertEquals(4, splits.length);
+    assertEquals(0, splits[3].length);
+    assertEquals(FLOW_NAME,
+        Separator.QUALIFIERS.decode(Bytes.toString(splits[2])));
+    verifyRowPrefixBytes(byteRowKeyPrefix);
+  }
+
+  @Test
+  public void testSubAppRowKey() {
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId("entity1");
+    entity.setType("DAG");
+    entity.setIdPrefix(54321);
+
+    byte[] byteRowKey =
+        new SubApplicationRowKey(SUB_APP_USER, CLUSTER,
+            entity.getType(), entity.getIdPrefix(),
+            entity.getId(), USER).getRowKey();
+    SubApplicationRowKey rowKey = SubApplicationRowKey.parseRowKey(byteRowKey);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(SUB_APP_USER, rowKey.getSubAppUserId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+    assertEquals(USER, rowKey.getUserId());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
new file mode 100644
index 0000000..c4d07c7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestRowKeysAsString.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
+import org.junit.Test;
+
+/**
+ * Test for row key as string.
+ */
+public class TestRowKeysAsString {
+
+  private final static String CLUSTER =
+      "cl" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR + "uster"
+          + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+  private final static String USER =
+      TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "user";
+  private final static String SUB_APP_USER =
+      TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "subAppUser";
+
+  private final static String FLOW_NAME =
+      "dummy_" + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR
+          + TimelineReaderUtils.DEFAULT_ESCAPE_CHAR + "flow"
+          + TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+  private final static Long FLOW_RUN_ID = System.currentTimeMillis();
+  private final static String APPLICATION_ID =
+      ApplicationId.newInstance(System.currentTimeMillis(), 1).toString();
+
+  @Test(timeout = 10000)
+  public void testApplicationRow() {
+    String rowKeyAsString = new ApplicationRowKey(CLUSTER, USER, FLOW_NAME,
+        FLOW_RUN_ID, APPLICATION_ID).getRowKeyAsString();
+    ApplicationRowKey rowKey =
+        ApplicationRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+  }
+
+  @Test(timeout = 10000)
+  public void testEntityRowKey() {
+    char del = TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+    char esc = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+    String id = del + esc + "ent" + esc + del + "ity" + esc + del + esc + "id"
+        + esc + del + esc;
+    String type = "entity" + esc + del + esc + "Type";
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId(id);
+    entity.setType(type);
+    entity.setIdPrefix(54321);
+
+    String rowKeyAsString =
+        new EntityRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID, APPLICATION_ID,
+            entity.getType(), entity.getIdPrefix(), entity.getId())
+                .getRowKeyAsString();
+    EntityRowKey rowKey = EntityRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+    assertEquals(APPLICATION_ID, rowKey.getAppId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+
+  }
+
+  @Test(timeout = 10000)
+  public void testFlowActivityRowKey() {
+    Long ts = 1459900830000L;
+    Long dayTimestamp = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(ts);
+    String rowKeyAsString = new FlowActivityRowKey(CLUSTER, ts, USER, FLOW_NAME)
+        .getRowKeyAsString();
+    FlowActivityRowKey rowKey =
+        FlowActivityRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(dayTimestamp, rowKey.getDayTimestamp());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+  }
+
+  @Test(timeout = 10000)
+  public void testFlowRunRowKey() {
+    String rowKeyAsString =
+        new FlowRunRowKey(CLUSTER, USER, FLOW_NAME, FLOW_RUN_ID)
+            .getRowKeyAsString();
+    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKeyFromString(rowKeyAsString);
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(USER, rowKey.getUserId());
+    assertEquals(FLOW_NAME, rowKey.getFlowName());
+    assertEquals(FLOW_RUN_ID, rowKey.getFlowRunId());
+  }
+
+  @Test(timeout = 10000)
+  public void testSubApplicationRowKey() {
+    char del = TimelineReaderUtils.DEFAULT_DELIMITER_CHAR;
+    char esc = TimelineReaderUtils.DEFAULT_ESCAPE_CHAR;
+    String id = del + esc + "ent" + esc + del + "ity" + esc + del + esc + "id"
+        + esc + del + esc;
+    String type = "entity" + esc + del + esc + "Type";
+    TimelineEntity entity = new TimelineEntity();
+    entity.setId(id);
+    entity.setType(type);
+    entity.setIdPrefix(54321);
+
+    String rowKeyAsString = new SubApplicationRowKey(SUB_APP_USER, CLUSTER,
+        entity.getType(), entity.getIdPrefix(), entity.getId(), USER)
+            .getRowKeyAsString();
+    SubApplicationRowKey rowKey = SubApplicationRowKey
+        .parseRowKeyFromString(rowKeyAsString);
+    assertEquals(SUB_APP_USER, rowKey.getSubAppUserId());
+    assertEquals(CLUSTER, rowKey.getClusterId());
+    assertEquals(entity.getType(), rowKey.getEntityType());
+    assertEquals(entity.getIdPrefix(), rowKey.getEntityIdPrefix().longValue());
+    assertEquals(entity.getId(), rowKey.getEntityId());
+    assertEquals(USER, rowKey.getUserId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
new file mode 100644
index 0000000..7d37206
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TestSeparator.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class TestSeparator {
+
+  private static String villain = "Dr. Heinz Doofenshmirtz";
+  private static String special =
+      ".   *   |   ?   +   \t   (   )   [   ]   {   }   ^   $  \\ \"  %";
+
+  /**
+   *
+   */
+  @Test
+  public void testEncodeDecodeString() {
+
+    for (Separator separator : Separator.values()) {
+      testEncodeDecode(separator, "");
+      testEncodeDecode(separator, " ");
+      testEncodeDecode(separator, "!");
+      testEncodeDecode(separator, "?");
+      testEncodeDecode(separator, "&");
+      testEncodeDecode(separator, "+");
+      testEncodeDecode(separator, "\t");
+      testEncodeDecode(separator, "Dr.");
+      testEncodeDecode(separator, "Heinz");
+      testEncodeDecode(separator, "Doofenshmirtz");
+      testEncodeDecode(separator, villain);
+      testEncodeDecode(separator, special);
+
+      assertNull(separator.encode(null));
+
+    }
+  }
+
+  private void testEncodeDecode(Separator separator, String token) {
+    String encoded = separator.encode(token);
+    String decoded = separator.decode(encoded);
+    String msg = "token:" + token + " separator:" + separator + ".";
+    assertEquals(msg, token, decoded);
+  }
+
+  @Test
+  public void testEncodeDecode() {
+    testEncodeDecode("Dr.", Separator.QUALIFIERS);
+    testEncodeDecode("Heinz", Separator.QUALIFIERS, Separator.QUALIFIERS);
+    testEncodeDecode("Doofenshmirtz", Separator.QUALIFIERS, null,
+        Separator.QUALIFIERS);
+    testEncodeDecode("&Perry", Separator.QUALIFIERS, Separator.VALUES, null);
+    testEncodeDecode("the ", Separator.QUALIFIERS, Separator.SPACE);
+    testEncodeDecode("Platypus...", (Separator) null);
+    testEncodeDecode("The what now ?!?", Separator.QUALIFIERS,
+        Separator.VALUES, Separator.SPACE);
+
+  }
+  @Test
+  public void testEncodedValues() {
+    testEncodeDecode("Double-escape %2$ and %9$ or %%2$ or %%3$, nor  %%%2$" +
+        "= no problem!",
+        Separator.QUALIFIERS, Separator.VALUES, Separator.SPACE, Separator.TAB);
+  }
+
+  @Test
+  public void testSplits() {
+    byte[] maxLongBytes = Bytes.toBytes(Long.MAX_VALUE);
+    byte[] maxIntBytes = Bytes.toBytes(Integer.MAX_VALUE);
+    for (Separator separator : Separator.values()) {
+      String str1 = "cl" + separator.getValue() + "us";
+      String str2 = separator.getValue() + "rst";
+      byte[] sepByteArr = Bytes.toBytes(separator.getValue());
+      byte[] longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
+          sepByteArr.length, Bytes.SIZEOF_LONG - sepByteArr.length));
+      byte[] intVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxIntBytes,
+          sepByteArr.length, Bytes.SIZEOF_INT - sepByteArr.length));
+      byte[] arr = separator.join(
+          Bytes.toBytes(separator.encode(str1)), longVal1Arr,
+          Bytes.toBytes(separator.encode(str2)), intVal1Arr);
+      int[] sizes = {Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+          Separator.VARIABLE_SIZE, Bytes.SIZEOF_INT};
+      byte[][] splits = separator.split(arr, sizes);
+      assertEquals(4, splits.length);
+      assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
+      assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
+      assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
+      assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
+
+      longVal1Arr = Bytes.add(Bytes.copy(maxLongBytes, 0, Bytes.SIZEOF_LONG -
+          sepByteArr.length), sepByteArr);
+      intVal1Arr = Bytes.add(Bytes.copy(maxIntBytes, 0, Bytes.SIZEOF_INT -
+          sepByteArr.length), sepByteArr);
+      arr = separator.join(Bytes.toBytes(separator.encode(str1)), longVal1Arr,
+          Bytes.toBytes(separator.encode(str2)), intVal1Arr);
+      splits = separator.split(arr, sizes);
+      assertEquals(4, splits.length);
+      assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
+      assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
+      assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
+      assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
+
+      longVal1Arr = Bytes.add(sepByteArr, Bytes.copy(maxLongBytes,
+          sepByteArr.length, 4 - sepByteArr.length), sepByteArr);
+      longVal1Arr = Bytes.add(longVal1Arr, Bytes.copy(maxLongBytes, 4, 3 -
+              sepByteArr.length), sepByteArr);
+      arr = separator.join(Bytes.toBytes(separator.encode(str1)), longVal1Arr,
+          Bytes.toBytes(separator.encode(str2)), intVal1Arr);
+      splits = separator.split(arr, sizes);
+      assertEquals(4, splits.length);
+      assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
+      assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[1]));
+      assertEquals(str2, separator.decode(Bytes.toString(splits[2])));
+      assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[3]));
+
+      arr = separator.join(Bytes.toBytes(separator.encode(str1)),
+          Bytes.toBytes(separator.encode(str2)), intVal1Arr, longVal1Arr);
+      int[] sizes1 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
+          Bytes.SIZEOF_INT, Bytes.SIZEOF_LONG};
+      splits = separator.split(arr, sizes1);
+      assertEquals(4, splits.length);
+      assertEquals(str1, separator.decode(Bytes.toString(splits[0])));
+      assertEquals(str2, separator.decode(Bytes.toString(splits[1])));
+      assertEquals(Bytes.toInt(intVal1Arr), Bytes.toInt(splits[2]));
+      assertEquals(Bytes.toLong(longVal1Arr), Bytes.toLong(splits[3]));
+
+      try {
+        int[] sizes2 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE,
+            Bytes.SIZEOF_INT, 7};
+        splits = separator.split(arr, sizes2);
+        fail("Exception should have been thrown.");
+      } catch (IllegalArgumentException e) {}
+
+      try {
+        int[] sizes2 = {Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, 2,
+            Bytes.SIZEOF_LONG};
+        splits = separator.split(arr, sizes2);
+        fail("Exception should have been thrown.");
+      } catch (IllegalArgumentException e) {}
+    }
+  }
+
+  /**
+   * Simple test to encode and decode using the same separators and confirm that
+   * we end up with the same as what we started with.
+   *
+   * @param token
+   * @param separators
+   */
+  private static void testEncodeDecode(String token, Separator... separators) {
+    byte[] encoded = Separator.encode(token, separators);
+    String decoded = Separator.decode(encoded, separators);
+    assertEquals(token, decoded);
+  }
+
+  @Test
+  public void testJoinStripped() {
+    List<String> stringList = new ArrayList<String>(0);
+    stringList.add("nothing");
+
+    String joined = Separator.VALUES.joinEncoded(stringList);
+    Iterable<String> split = Separator.VALUES.splitEncoded(joined);
+    assertTrue(Iterables.elementsEqual(stringList, split));
+
+    stringList = new ArrayList<String>(3);
+    stringList.add("a");
+    stringList.add("b?");
+    stringList.add("c");
+
+    joined = Separator.VALUES.joinEncoded(stringList);
+    split = Separator.VALUES.splitEncoded(joined);
+    assertTrue(Iterables.elementsEqual(stringList, split));
+
+    String[] stringArray1 = {"else"};
+    joined = Separator.VALUES.joinEncoded(stringArray1);
+    split = Separator.VALUES.splitEncoded(joined);
+    assertTrue(Iterables.elementsEqual(Arrays.asList(stringArray1), split));
+
+    String[] stringArray2 = {"d", "e?", "f"};
+    joined = Separator.VALUES.joinEncoded(stringArray2);
+    split = Separator.VALUES.splitEncoded(joined);
+    assertTrue(Iterables.elementsEqual(Arrays.asList(stringArray2), split));
+
+    List<String> empty = new ArrayList<String>(0);
+    split = Separator.VALUES.splitEncoded(null);
+    assertTrue(Iterables.elementsEqual(empty, split));
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
new file mode 100644
index 0000000..d06907d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/pom.xml
@@ -0,0 +1,161 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                             http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hadoop-yarn-server-timelineservice-hbase</artifactId>
+    <groupId>org.apache.hadoop</groupId>
+    <version>3.2.0-SNAPSHOT</version>
+  </parent>
+
+  <modelVersion>4.0.0</modelVersion>
+  <artifactId>hadoop-yarn-server-timelineservice-hbase-server</artifactId>
+  <name>Apache Hadoop YARN TimelineService HBase Server</name>
+  <version>3.2.0-SNAPSHOT</version>
+
+  <properties>
+    <!-- Needed for generating FindBugs warnings using parent pom -->
+    <yarn.basedir>${project.parent.parent.parent.basedir}</yarn.basedir>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice-hbase-common</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-common</artifactId>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-client</artifactId>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-server</artifactId>
+      <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-sslengine</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <descriptor>src/assembly/coprocessor.xml</descriptor>
+          <attach>true</attach>
+        </configuration>
+        <executions>
+          <execution>
+            <id>create-coprocessor-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/assembly/coprocessor.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/assembly/coprocessor.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/assembly/coprocessor.xml
new file mode 100644
index 0000000..01ff0dd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/assembly/coprocessor.xml
@@ -0,0 +1,37 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.01
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3"
+          xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+          xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.3 http://maven.apache.org/xsd/assembly-1.1.3.xsd">
+  <id>coprocessor</id>
+  <formats>
+    <format>jar</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>/</outputDirectory>
+      <useProjectArtifact>true</useProjectArtifact>
+      <unpack>true</unpack>
+      <scope>runtime</scope>
+      <includes>
+        <include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-common</include>
+        <include>org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-server</include>
+      </includes>
+    </dependencySet>
+  </dependencySets>
+</assembly>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineServerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineServerUtils.java
new file mode 100644
index 0000000..5c07670
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineServerUtils.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.Tag;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationCompactionDimension;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.AggregationOperation;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A utility class used by hbase-server module.
+ */
+public final class HBaseTimelineServerUtils {
+  private HBaseTimelineServerUtils() {
+  }
+
+  /**
+   * Creates a {@link Tag} from the input attribute.
+   *
+   * @param attribute Attribute from which tag has to be fetched.
+   * @return a HBase Tag.
+   */
+  public static Tag getTagFromAttribute(Map.Entry<String, byte[]> attribute) {
+    // attribute could be either an Aggregation Operation or
+    // an Aggregation Dimension
+    // Get the Tag type from either
+    AggregationOperation aggOp = AggregationOperation
+        .getAggregationOperation(attribute.getKey());
+    if (aggOp != null) {
+      Tag t = new Tag(aggOp.getTagType(), attribute.getValue());
+      return t;
+    }
+
+    AggregationCompactionDimension aggCompactDim =
+        AggregationCompactionDimension.getAggregationCompactionDimension(
+            attribute.getKey());
+    if (aggCompactDim != null) {
+      Tag t = new Tag(aggCompactDim.getTagType(), attribute.getValue());
+      return t;
+    }
+    return null;
+  }
+
+  /**
+   * creates a new cell based on the input cell but with the new value.
+   *
+   * @param origCell Original cell
+   * @param newValue new cell value
+   * @return cell
+   * @throws IOException while creating new cell.
+   */
+  public static Cell createNewCell(Cell origCell, byte[] newValue)
+      throws IOException {
+    return CellUtil.createCell(CellUtil.cloneRow(origCell),
+        CellUtil.cloneFamily(origCell), CellUtil.cloneQualifier(origCell),
+        origCell.getTimestamp(), KeyValue.Type.Put.getCode(), newValue);
+  }
+
+  /**
+   * creates a cell with the given inputs.
+   *
+   * @param row row of the cell to be created
+   * @param family column family name of the new cell
+   * @param qualifier qualifier for the new cell
+   * @param ts timestamp of the new cell
+   * @param newValue value of the new cell
+   * @param tags tags in the new cell
+   * @return cell
+   * @throws IOException while creating the cell.
+   */
+  public static Cell createNewCell(byte[] row, byte[] family, byte[] qualifier,
+      long ts, byte[] newValue, byte[] tags) throws IOException {
+    return CellUtil.createCell(row, family, qualifier, ts, KeyValue.Type.Put,
+        newValue, tags);
+  }
+
+  /**
+   * returns app id from the list of tags.
+   *
+   * @param tags cell tags to be looked into
+   * @return App Id as the AggregationCompactionDimension
+   */
+  public static String getAggregationCompactionDimension(List<Tag> tags) {
+    String appId = null;
+    for (Tag t : tags) {
+      if (AggregationCompactionDimension.APPLICATION_ID.getTagType() == t
+          .getType()) {
+        appId = Bytes.toString(t.getValue());
+        return appId;
+      }
+    }
+    return appId;
+  }
+
+  /**
+   * Returns the first seen aggregation operation as seen in the list of input
+   * tags or null otherwise.
+   *
+   * @param tags list of HBase tags.
+   * @return AggregationOperation
+   */
+  public static AggregationOperation getAggregationOperationFromTagsList(
+      List<Tag> tags) {
+    for (AggregationOperation aggOp : AggregationOperation.values()) {
+      for (Tag tag : tags) {
+        if (tag.getType() == aggOp.getTagType()) {
+          return aggOp;
+        }
+      }
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
new file mode 100644
index 0000000..0df5b8a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.common contains
+ * a set of utility classes used across backend storage reader and writer.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HDFS-13168. XmlImageVisitor - Prefer Array over LinkedList. Contributed by BELUGA BEHR.

Posted by ha...@apache.org.
HDFS-13168. XmlImageVisitor - Prefer Array over LinkedList. Contributed by BELUGA BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17c592e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17c592e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17c592e6

Branch: refs/heads/HDFS-12996
Commit: 17c592e6cfd1ea3dbe9671c4703caabd095d87cf
Parents: 9028cca
Author: Inigo Goiri <in...@apache.org>
Authored: Tue Feb 20 15:16:01 2018 -0800
Committer: Inigo Goiri <in...@apache.org>
Committed: Tue Feb 20 15:16:01 2018 -0800

----------------------------------------------------------------------
 .../tools/offlineImageViewer/XmlImageVisitor.java | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c592e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
index 44593a3..a326049 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
@@ -18,16 +18,17 @@
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
 import java.io.IOException;
-import java.util.LinkedList;
+import java.util.ArrayDeque;
+import java.util.Deque;
 
 import org.apache.hadoop.hdfs.util.XMLUtils;
+
 /**
  * An XmlImageVisitor walks over an fsimage structure and writes out
  * an equivalent XML document that contains the fsimage's components.
  */
 public class XmlImageVisitor extends TextWriterImageVisitor {
-  final private LinkedList<ImageElement> tagQ =
-                                          new LinkedList<ImageElement>();
+  final private Deque<ImageElement> tagQ = new ArrayDeque<>();
 
   public XmlImageVisitor(String filename) throws IOException {
     super(filename, false);
@@ -51,9 +52,10 @@ public class XmlImageVisitor extends TextWriterImageVisitor {
 
   @Override
   void leaveEnclosingElement() throws IOException {
-    if(tagQ.size() == 0)
+    if (tagQ.isEmpty()) {
       throw new IOException("Tried to exit non-existent enclosing element " +
-                "in FSImage file");
+          "in FSImage file");
+    }
 
     ImageElement element = tagQ.pop();
     write("</" + element.toString() + ">\n");
@@ -71,7 +73,7 @@ public class XmlImageVisitor extends TextWriterImageVisitor {
 
   @Override
   void visitEnclosingElement(ImageElement element) throws IOException {
-    write("<" + element.toString() + ">\n");
+    write('<' + element.toString() + ">\n");
     tagQ.push(element);
   }
 
@@ -79,12 +81,12 @@ public class XmlImageVisitor extends TextWriterImageVisitor {
   void visitEnclosingElement(ImageElement element,
       ImageElement key, String value)
        throws IOException {
-    write("<" + element.toString() + " " + key + "=\"" + value +"\">\n");
+    write('<' + element.toString() + ' ' + key + "=\"" + value +"\">\n");
     tagQ.push(element);
   }
 
   private void writeTag(String tag, String value) throws IOException {
-    write("<" + tag + ">" +
+    write('<' + tag + '>' +
         XMLUtils.mangleXmlString(value, true) + "</" + tag + ">\n");
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HADOOP-15236. Fix typo in RequestHedgingProxyProvider and RequestHedgingRMFailoverProxyProvider

Posted by ha...@apache.org.
HADOOP-15236. Fix typo in RequestHedgingProxyProvider and RequestHedgingRMFailoverProxyProvider

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c36b4aa3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c36b4aa3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c36b4aa3

Branch: refs/heads/HDFS-12996
Commit: c36b4aa31ce25fbe5fa173bce36da2950d74a475
Parents: 514794e
Author: Gabor Bota <ga...@cloudera.com>
Authored: Fri Feb 23 13:55:18 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Fri Feb 23 13:55:18 2018 +0900

----------------------------------------------------------------------
 .../hdfs/server/namenode/ha/RequestHedgingProxyProvider.java       | 2 +-
 .../hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java  | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36b4aa3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 08edfe2..010e9e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory;
  * per-se. It constructs a wrapper proxy that sends the request to ALL
  * underlying proxies simultaneously. It assumes the in an HA setup, there will
  * be only one Active, and the active should respond faster than any configured
- * standbys. Once it receive a response from any one of the configred proxies,
+ * standbys. Once it receive a response from any one of the configured proxies,
  * outstanding requests to other proxies are immediately cancelled.
  */
 public class RequestHedgingProxyProvider<T> extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c36b4aa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
index 4c16225..c1e9da1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RequestHedgingRMFailoverProxyProvider.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
  * underlying proxies simultaneously. Each proxy inside the wrapper proxy will
  * retry the corresponding target. It assumes the in an HA setup, there will be
  * only one Active, and the active should respond faster than any configured
- * standbys. Once it receives a response from any one of the configred proxies,
+ * standbys. Once it receives a response from any one of the configured proxies,
  * outstanding requests to other proxies are immediately cancelled.
  */
 public class RequestHedgingRMFailoverProxyProvider<T>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
deleted file mode 100644
index 0edd6a5..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
+++ /dev/null
@@ -1,520 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Query;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for application entities that are stored in the
- * application table.
- */
-class ApplicationEntityReader extends GenericEntityReader {
-  private static final ApplicationTable APPLICATION_TABLE =
-      new ApplicationTable();
-
-  public ApplicationEntityReader(TimelineReaderContext ctxt,
-      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, entityFilters, toRetrieve);
-  }
-
-  public ApplicationEntityReader(TimelineReaderContext ctxt,
-      TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link ApplicationTable}.
-   */
-  protected BaseTable<?> getTable() {
-    return APPLICATION_TABLE;
-  }
-
-  /**
-   * This method is called only for multiple entity reads.
-   */
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-    // Filters here cannot be null for multiple entity reads as they are set in
-    // augmentParams if null.
-    TimelineEntityFilters filters = getFilters();
-    FilterList listBasedOnFilters = new FilterList();
-    // Create filter list based on created time range and add it to
-    // listBasedOnFilters.
-    long createdTimeBegin = filters.getCreatedTimeBegin();
-    long createdTimeEnd = filters.getCreatedTimeEnd();
-    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
-      listBasedOnFilters.addFilter(
-          TimelineFilterUtils.createSingleColValueFiltersByRange(
-          ApplicationColumn.CREATED_TIME, createdTimeBegin, createdTimeEnd));
-    }
-    // Create filter list based on metric filters and add it to
-    // listBasedOnFilters.
-    TimelineFilterList metricFilters = filters.getMetricFilters();
-    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(
-          TimelineFilterUtils.createHBaseFilterList(
-              ApplicationColumnPrefix.METRIC, metricFilters));
-    }
-    // Create filter list based on config filters and add it to
-    // listBasedOnFilters.
-    TimelineFilterList configFilters = filters.getConfigFilters();
-    if (configFilters != null && !configFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(
-          TimelineFilterUtils.createHBaseFilterList(
-              ApplicationColumnPrefix.CONFIG, configFilters));
-    }
-    // Create filter list based on info filters and add it to listBasedOnFilters
-    TimelineFilterList infoFilters = filters.getInfoFilters();
-    if (infoFilters != null && !infoFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(
-          TimelineFilterUtils.createHBaseFilterList(
-              ApplicationColumnPrefix.INFO, infoFilters));
-    }
-    return listBasedOnFilters;
-  }
-
-  /**
-   * Add {@link QualifierFilter} filters to filter list for each column of
-   * application table.
-   *
-   * @param list filter list to which qualifier filters have to be added.
-   */
-  @Override
-  protected void updateFixedColumns(FilterList list) {
-    for (ApplicationColumn column : ApplicationColumn.values()) {
-      list.addFilter(new QualifierFilter(CompareOp.EQUAL,
-          new BinaryComparator(column.getColumnQualifierBytes())));
-    }
-  }
-
-  /**
-   * Creates a filter list which indicates that only some of the column
-   * qualifiers in the info column family will be returned in result.
-   *
-   * @return filter list.
-   * @throws IOException if any problem occurs while creating filter list.
-   */
-  private FilterList createFilterListForColsOfInfoFamily()
-      throws IOException {
-    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
-    // Add filters for each column in entity table.
-    updateFixedColumns(infoFamilyColsFilter);
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // If INFO field has to be retrieved, add a filter for fetching columns
-    // with INFO column prefix.
-    if (hasField(fieldsToRetrieve, Field.INFO)) {
-      infoFamilyColsFilter.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.EQUAL, ApplicationColumnPrefix.INFO));
-    }
-    TimelineFilterList relatesTo = getFilters().getRelatesTo();
-    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-      // If RELATES_TO field has to be retrieved, add a filter for fetching
-      // columns with RELATES_TO column prefix.
-      infoFamilyColsFilter.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.EQUAL, ApplicationColumnPrefix.RELATES_TO));
-    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
-      // Even if fields to retrieve does not contain RELATES_TO, we still
-      // need to have a filter to fetch some of the column qualifiers if
-      // relatesTo filters are specified. relatesTo filters will then be
-      // matched after fetching rows from HBase.
-      Set<String> relatesToCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          ApplicationColumnPrefix.RELATES_TO, relatesToCols));
-    }
-    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
-    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
-      // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
-      // columns with IS_RELATED_TO column prefix.
-      infoFamilyColsFilter.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.EQUAL, ApplicationColumnPrefix.IS_RELATED_TO));
-    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
-      // Even if fields to retrieve does not contain IS_RELATED_TO, we still
-      // need to have a filter to fetch some of the column qualifiers if
-      // isRelatedTo filters are specified. isRelatedTo filters will then be
-      // matched after fetching rows from HBase.
-      Set<String> isRelatedToCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          ApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
-    }
-    TimelineFilterList eventFilters = getFilters().getEventFilters();
-    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
-      // If EVENTS field has to be retrieved, add a filter for fetching columns
-      // with EVENT column prefix.
-      infoFamilyColsFilter.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.EQUAL, ApplicationColumnPrefix.EVENT));
-    } else if (eventFilters != null && !eventFilters.getFilterList().isEmpty()){
-      // Even if fields to retrieve does not contain EVENTS, we still need to
-      // have a filter to fetch some of the column qualifiers on the basis of
-      // event filters specified. Event filters will then be matched after
-      // fetching rows from HBase.
-      Set<String> eventCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          ApplicationColumnPrefix.EVENT, eventCols));
-    }
-    return infoFamilyColsFilter;
-  }
-
-  /**
-   * Exclude column prefixes via filters which are not required(based on fields
-   * to retrieve) from info column family. These filters are added to filter
-   * list which contains a filter for getting info column family.
-   *
-   * @param infoColFamilyList filter list for info column family.
-   */
-  private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // Events not required.
-    if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
-      infoColFamilyList.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.NOT_EQUAL, ApplicationColumnPrefix.EVENT));
-    }
-    // info not required.
-    if (!hasField(fieldsToRetrieve, Field.INFO)) {
-      infoColFamilyList.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.NOT_EQUAL, ApplicationColumnPrefix.INFO));
-    }
-    // is related to not required.
-    if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
-      infoColFamilyList.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.NOT_EQUAL, ApplicationColumnPrefix.IS_RELATED_TO));
-    }
-    // relates to not required.
-    if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-      infoColFamilyList.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.NOT_EQUAL, ApplicationColumnPrefix.RELATES_TO));
-    }
-  }
-
-  /**
-   * Updates filter list based on fields for confs and metrics to retrieve.
-   *
-   * @param listBasedOnFields filter list based on fields.
-   * @throws IOException if any problem occurs while updating filter list.
-   */
-  private void updateFilterForConfsAndMetricsToRetrieve(
-      FilterList listBasedOnFields) throws IOException {
-    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
-    // Please note that if confsToRetrieve is specified, we would have added
-    // CONFS to fields to retrieve in augmentParams() even if not specified.
-    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
-      // Create a filter list for configs.
-      listBasedOnFields.addFilter(TimelineFilterUtils.
-          createFilterForConfsOrMetricsToRetrieve(
-              dataToRetrieve.getConfsToRetrieve(),
-              ApplicationColumnFamily.CONFIGS, ApplicationColumnPrefix.CONFIG));
-    }
-
-    // Please note that if metricsToRetrieve is specified, we would have added
-    // METRICS to fields to retrieve in augmentParams() even if not specified.
-    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
-      // Create a filter list for metrics.
-      listBasedOnFields.addFilter(TimelineFilterUtils.
-          createFilterForConfsOrMetricsToRetrieve(
-              dataToRetrieve.getMetricsToRetrieve(),
-              ApplicationColumnFamily.METRICS, ApplicationColumnPrefix.METRIC));
-    }
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() throws IOException {
-    if (!needCreateFilterListBasedOnFields()) {
-      // Fetch all the columns. No need of a filter.
-      return null;
-    }
-    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
-    FilterList infoColFamilyList = new FilterList();
-    // By default fetch everything in INFO column family.
-    FamilyFilter infoColumnFamily =
-        new FamilyFilter(CompareOp.EQUAL,
-            new BinaryComparator(ApplicationColumnFamily.INFO.getBytes()));
-    infoColFamilyList.addFilter(infoColumnFamily);
-    if (!isSingleEntityRead() && fetchPartialColsFromInfoFamily()) {
-      // We can fetch only some of the columns from info family.
-      infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
-    } else {
-      // Exclude column prefixes in info column family which are not required
-      // based on fields to retrieve.
-      excludeFieldsFromInfoColFamily(infoColFamilyList);
-    }
-    listBasedOnFields.addFilter(infoColFamilyList);
-
-    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
-    return listBasedOnFields;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException {
-    TimelineReaderContext context = getContext();
-    ApplicationRowKey applicationRowKey =
-        new ApplicationRowKey(context.getClusterId(), context.getUserId(),
-            context.getFlowName(), context.getFlowRunId(), context.getAppId());
-    byte[] rowKey = applicationRowKey.getRowKey();
-    Get get = new Get(rowKey);
-    // Set time range for metric values.
-    setMetricsTimeRange(get);
-    get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
-    if (filterList != null && !filterList.getFilters().isEmpty()) {
-      get.setFilter(filterList);
-    }
-    return getTable().getResult(hbaseConf, conn, get);
-  }
-
-  @Override
-  protected void validateParams() {
-    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
-    Preconditions.checkNotNull(
-        getDataToRetrieve(), "data to retrieve shouldn't be null");
-    Preconditions.checkNotNull(getContext().getClusterId(),
-        "clusterId shouldn't be null");
-    Preconditions.checkNotNull(getContext().getEntityType(),
-        "entityType shouldn't be null");
-    if (isSingleEntityRead()) {
-      Preconditions.checkNotNull(getContext().getAppId(),
-          "appId shouldn't be null");
-    } else {
-      Preconditions.checkNotNull(getContext().getUserId(),
-          "userId shouldn't be null");
-      Preconditions.checkNotNull(getContext().getFlowName(),
-          "flowName shouldn't be null");
-    }
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-      throws IOException {
-    if (isSingleEntityRead()) {
-      // Get flow context information from AppToFlow table.
-      defaultAugmentParams(hbaseConf, conn);
-    }
-    // Add configs/metrics to fields to retrieve if confsToRetrieve and/or
-    // metricsToRetrieve are specified.
-    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
-    if (!isSingleEntityRead()) {
-      createFiltersIfNull();
-    }
-  }
-
-  private void setMetricsTimeRange(Query query) {
-    // Set time range for metric values.
-    HBaseTimelineStorageUtils.setMetricsTimeRange(
-        query, ApplicationColumnFamily.METRICS.getBytes(),
-        getDataToRetrieve().getMetricsTimeBegin(),
-        getDataToRetrieve().getMetricsTimeEnd());
-  }
-
-  @Override
-  protected ResultScanner getResults(Configuration hbaseConf,
-      Connection conn, FilterList filterList) throws IOException {
-    Scan scan = new Scan();
-    TimelineReaderContext context = getContext();
-    RowKeyPrefix<ApplicationRowKey> applicationRowKeyPrefix = null;
-
-    // Whether or not flowRunID is null doesn't matter, the
-    // ApplicationRowKeyPrefix will do the right thing.
-    // default mode, will always scans from beginning of entity type.
-    if (getFilters().getFromId() == null) {
-      applicationRowKeyPrefix = new ApplicationRowKeyPrefix(
-          context.getClusterId(), context.getUserId(), context.getFlowName(),
-          context.getFlowRunId());
-      scan.setRowPrefixFilter(applicationRowKeyPrefix.getRowKeyPrefix());
-    } else {
-      ApplicationRowKey applicationRowKey = null;
-      try {
-        applicationRowKey =
-            ApplicationRowKey.parseRowKeyFromString(getFilters().getFromId());
-      } catch (IllegalArgumentException e) {
-        throw new BadRequestException("Invalid filter fromid is provided.");
-      }
-      if (!context.getClusterId().equals(applicationRowKey.getClusterId())) {
-        throw new BadRequestException(
-            "fromid doesn't belong to clusterId=" + context.getClusterId());
-      }
-
-      // set start row
-      scan.setStartRow(applicationRowKey.getRowKey());
-
-      // get the bytes for stop row
-      applicationRowKeyPrefix = new ApplicationRowKeyPrefix(
-          context.getClusterId(), context.getUserId(), context.getFlowName(),
-          context.getFlowRunId());
-
-      // set stop row
-      scan.setStopRow(
-          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
-              applicationRowKeyPrefix.getRowKeyPrefix()));
-    }
-
-    FilterList newList = new FilterList();
-    newList.addFilter(new PageFilter(getFilters().getLimit()));
-    if (filterList != null && !filterList.getFilters().isEmpty()) {
-      newList.addFilter(filterList);
-    }
-    scan.setFilter(newList);
-
-    // Set time range for metric values.
-    setMetricsTimeRange(scan);
-    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
-    return getTable().getResultScanner(hbaseConf, conn, scan);
-  }
-
-  @Override
-  protected TimelineEntity parseEntity(Result result) throws IOException {
-    if (result == null || result.isEmpty()) {
-      return null;
-    }
-    TimelineEntity entity = new TimelineEntity();
-    entity.setType(TimelineEntityType.YARN_APPLICATION.toString());
-    String entityId = ApplicationColumn.ID.readResult(result).toString();
-    entity.setId(entityId);
-
-    TimelineEntityFilters filters = getFilters();
-    // fetch created time
-    Long createdTime = (Long) ApplicationColumn.CREATED_TIME.readResult(result);
-    entity.setCreatedTime(createdTime);
-
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // fetch is related to entities and match isRelatedTo filter. If isRelatedTo
-    // filters do not match, entity would be dropped. We have to match filters
-    // locally as relevant HBase filters to filter out rows on the basis of
-    // isRelatedTo are not set in HBase scan.
-    boolean checkIsRelatedTo =
-        !isSingleEntityRead() && filters.getIsRelatedTo() != null &&
-        filters.getIsRelatedTo().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO) || checkIsRelatedTo) {
-      readRelationship(entity, result, ApplicationColumnPrefix.IS_RELATED_TO,
-          true);
-      if (checkIsRelatedTo && !TimelineStorageUtils.matchIsRelatedTo(entity,
-          filters.getIsRelatedTo())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve,
-          Field.IS_RELATED_TO)) {
-        entity.getIsRelatedToEntities().clear();
-      }
-    }
-
-    // fetch relates to entities and match relatesTo filter. If relatesTo
-    // filters do not match, entity would be dropped. We have to match filters
-    // locally as relevant HBase filters to filter out rows on the basis of
-    // relatesTo are not set in HBase scan.
-    boolean checkRelatesTo =
-        !isSingleEntityRead() && filters.getRelatesTo() != null &&
-        filters.getRelatesTo().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.RELATES_TO) ||
-        checkRelatesTo) {
-      readRelationship(entity, result, ApplicationColumnPrefix.RELATES_TO,
-          false);
-      if (checkRelatesTo && !TimelineStorageUtils.matchRelatesTo(entity,
-          filters.getRelatesTo())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-        entity.getRelatesToEntities().clear();
-      }
-    }
-
-    // fetch info if fieldsToRetrieve contains INFO or ALL.
-    if (hasField(fieldsToRetrieve, Field.INFO)) {
-      readKeyValuePairs(entity, result, ApplicationColumnPrefix.INFO, false);
-    }
-
-    // fetch configs if fieldsToRetrieve contains CONFIGS or ALL.
-    if (hasField(fieldsToRetrieve, Field.CONFIGS)) {
-      readKeyValuePairs(entity, result, ApplicationColumnPrefix.CONFIG, true);
-    }
-
-    // fetch events and match event filters if they exist. If event filters do
-    // not match, entity would be dropped. We have to match filters locally
-    // as relevant HBase filters to filter out rows on the basis of events
-    // are not set in HBase scan.
-    boolean checkEvents =
-        !isSingleEntityRead() && filters.getEventFilters() != null &&
-        filters.getEventFilters().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.EVENTS) || checkEvents) {
-      readEvents(entity, result, ApplicationColumnPrefix.EVENT);
-      if (checkEvents && !TimelineStorageUtils.matchEventFilters(entity,
-          filters.getEventFilters())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
-        entity.getEvents().clear();
-      }
-    }
-
-    // fetch metrics if fieldsToRetrieve contains METRICS or ALL.
-    if (hasField(fieldsToRetrieve, Field.METRICS)) {
-      readMetrics(entity, result, ApplicationColumnPrefix.METRIC);
-    }
-
-    ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(result.getRow());
-    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
-        rowKey.getRowKeyAsString());
-    return entity;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
deleted file mode 100644
index 3f72334..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Set;
-import java.util.TreeSet;
-
-/**
- * Timeline entity reader for listing all available entity types given one
- * reader context. Right now only supports listing all entity types within one
- * YARN application.
- */
-public final class EntityTypeReader extends AbstractTimelineStorageReader {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(EntityTypeReader.class);
-  private static final EntityTable ENTITY_TABLE = new EntityTable();
-
-  public EntityTypeReader(TimelineReaderContext context) {
-    super(context);
-  }
-
-  /**
-   * Reads a set of timeline entity types from the HBase storage for the given
-   * context.
-   *
-   * @param hbaseConf HBase Configuration.
-   * @param conn HBase Connection.
-   * @return a set of <cite>TimelineEntity</cite> objects, with only type field
-   *         set.
-   * @throws IOException if any exception is encountered while reading entities.
-   */
-  public Set<String> readEntityTypes(Configuration hbaseConf,
-      Connection conn) throws IOException {
-
-    validateParams();
-    augmentParams(hbaseConf, conn);
-
-    Set<String> types = new TreeSet<>();
-    TimelineReaderContext context = getContext();
-    EntityRowKeyPrefix prefix = new EntityRowKeyPrefix(context.getClusterId(),
-        context.getUserId(), context.getFlowName(), context.getFlowRunId(),
-        context.getAppId());
-    byte[] currRowKey = prefix.getRowKeyPrefix();
-    byte[] nextRowKey = prefix.getRowKeyPrefix();
-    nextRowKey[nextRowKey.length - 1]++;
-
-    FilterList typeFilterList = new FilterList();
-    typeFilterList.addFilter(new FirstKeyOnlyFilter());
-    typeFilterList.addFilter(new KeyOnlyFilter());
-    typeFilterList.addFilter(new PageFilter(1));
-    LOG.debug("FilterList created for scan is - {}", typeFilterList);
-
-    int counter = 0;
-    while (true) {
-      try (ResultScanner results =
-          getResult(hbaseConf, conn, typeFilterList, currRowKey, nextRowKey)) {
-        TimelineEntity entity = parseEntityForType(results.next());
-        if (entity == null) {
-          break;
-        }
-        ++counter;
-        if (!types.add(entity.getType())) {
-          LOG.warn("Failed to add type " + entity.getType()
-              + " to the result set because there is a duplicated copy. ");
-        }
-        String currType = entity.getType();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Current row key: " + Arrays.toString(currRowKey));
-          LOG.debug("New entity type discovered: " + currType);
-        }
-        currRowKey = getNextRowKey(prefix.getRowKeyPrefix(), currType);
-      }
-    }
-    LOG.debug("Scanned {} records for {} types", counter, types.size());
-    return types;
-  }
-
-  @Override
-  protected void validateParams() {
-    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
-    Preconditions.checkNotNull(getContext().getClusterId(),
-        "clusterId shouldn't be null");
-    Preconditions.checkNotNull(getContext().getAppId(),
-        "appId shouldn't be null");
-  }
-
-  /**
-   * Gets the possibly next row key prefix given current prefix and type.
-   *
-   * @param currRowKeyPrefix The current prefix that contains user, cluster,
-   *                         flow, run, and application id.
-   * @param entityType Current entity type.
-   * @return A new prefix for the possibly immediately next row key.
-   */
-  private static byte[] getNextRowKey(byte[] currRowKeyPrefix,
-      String entityType) {
-    if (currRowKeyPrefix == null || entityType == null) {
-      return null;
-    }
-
-    byte[] entityTypeEncoded = Separator.QUALIFIERS.join(
-        Separator.encode(entityType, Separator.SPACE, Separator.TAB,
-            Separator.QUALIFIERS),
-        Separator.EMPTY_BYTES);
-
-    byte[] currRowKey
-        = new byte[currRowKeyPrefix.length + entityTypeEncoded.length];
-    System.arraycopy(currRowKeyPrefix, 0, currRowKey, 0,
-        currRowKeyPrefix.length);
-    System.arraycopy(entityTypeEncoded, 0, currRowKey, currRowKeyPrefix.length,
-        entityTypeEncoded.length);
-
-    return HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
-        currRowKey);
-  }
-
-  private ResultScanner getResult(Configuration hbaseConf, Connection conn,
-      FilterList filterList, byte[] startPrefix, byte[] endPrefix)
-      throws IOException {
-    Scan scan = new Scan(startPrefix, endPrefix);
-    scan.setFilter(filterList);
-    scan.setSmall(true);
-    return ENTITY_TABLE.getResultScanner(hbaseConf, conn, scan);
-  }
-
-  private TimelineEntity parseEntityForType(Result result)
-      throws IOException {
-    if (result == null || result.isEmpty()) {
-      return null;
-    }
-    TimelineEntity entity = new TimelineEntity();
-    EntityRowKey newRowKey = EntityRowKey.parseRowKey(result.getRow());
-    entity.setType(newRowKey.getEntityType());
-    return entity;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
deleted file mode 100644
index a1cdb29..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for flow activity entities that are stored in the
- * flow activity table.
- */
-class FlowActivityEntityReader extends TimelineEntityReader {
-  private static final FlowActivityTable FLOW_ACTIVITY_TABLE =
-      new FlowActivityTable();
-
-  /**
-   * Used to convert Long key components to and from storage format.
-   */
-  private final KeyConverter<Long> longKeyConverter = new LongKeyConverter();
-
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, entityFilters, toRetrieve);
-  }
-
-  public FlowActivityEntityReader(TimelineReaderContext ctxt,
-      TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link FlowActivityTable}.
-   */
-  @Override
-  protected BaseTable<?> getTable() {
-    return FLOW_ACTIVITY_TABLE;
-  }
-
-  @Override
-  protected void validateParams() {
-    Preconditions.checkNotNull(getContext().getClusterId(),
-        "clusterId shouldn't be null");
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-      throws IOException {
-    createFiltersIfNull();
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-    return null;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() {
-    return null;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException {
-    throw new UnsupportedOperationException(
-        "we don't support a single entity query");
-  }
-
-  @Override
-  protected ResultScanner getResults(Configuration hbaseConf,
-      Connection conn, FilterList filterList) throws IOException {
-    Scan scan = new Scan();
-    String clusterId = getContext().getClusterId();
-    if (getFilters().getFromId() == null
-        && getFilters().getCreatedTimeBegin() == 0L
-        && getFilters().getCreatedTimeEnd() == Long.MAX_VALUE) {
-       // All records have to be chosen.
-      scan.setRowPrefixFilter(new FlowActivityRowKeyPrefix(clusterId)
-          .getRowKeyPrefix());
-    } else if (getFilters().getFromId() != null) {
-      FlowActivityRowKey key = null;
-      try {
-        key =
-            FlowActivityRowKey.parseRowKeyFromString(getFilters().getFromId());
-      } catch (IllegalArgumentException e) {
-        throw new BadRequestException("Invalid filter fromid is provided.");
-      }
-      if (!clusterId.equals(key.getClusterId())) {
-        throw new BadRequestException(
-            "fromid doesn't belong to clusterId=" + clusterId);
-      }
-      scan.setStartRow(key.getRowKey());
-      scan.setStopRow(
-          new FlowActivityRowKeyPrefix(clusterId,
-              (getFilters().getCreatedTimeBegin() <= 0 ? 0
-                  : (getFilters().getCreatedTimeBegin() - 1)))
-                      .getRowKeyPrefix());
-    } else {
-      scan.setStartRow(new FlowActivityRowKeyPrefix(clusterId, getFilters()
-          .getCreatedTimeEnd()).getRowKeyPrefix());
-      scan.setStopRow(new FlowActivityRowKeyPrefix(clusterId, (getFilters()
-          .getCreatedTimeBegin() <= 0 ? 0
-          : (getFilters().getCreatedTimeBegin() - 1))).getRowKeyPrefix());
-    }
-    // use the page filter to limit the result to the page size
-    // the scanner may still return more than the limit; therefore we need to
-    // read the right number as we iterate
-    scan.setFilter(new PageFilter(getFilters().getLimit()));
-    return getTable().getResultScanner(hbaseConf, conn, scan);
-  }
-
-  @Override
-  protected TimelineEntity parseEntity(Result result) throws IOException {
-    FlowActivityRowKey rowKey = FlowActivityRowKey.parseRowKey(result.getRow());
-
-    Long time = rowKey.getDayTimestamp();
-    String user = rowKey.getUserId();
-    String flowName = rowKey.getFlowName();
-
-    FlowActivityEntity flowActivity = new FlowActivityEntity(
-        getContext().getClusterId(), time, user, flowName);
-    // set the id
-    flowActivity.setId(flowActivity.getId());
-    // get the list of run ids along with the version that are associated with
-    // this flow on this day
-    Map<Long, Object> runIdsMap =
-        FlowActivityColumnPrefix.RUN_ID.readResults(result, longKeyConverter);
-    for (Map.Entry<Long, Object> e : runIdsMap.entrySet()) {
-      Long runId = e.getKey();
-      String version = (String)e.getValue();
-      FlowRunEntity flowRun = new FlowRunEntity();
-      flowRun.setUser(user);
-      flowRun.setName(flowName);
-      flowRun.setRunId(runId);
-      flowRun.setVersion(version);
-      // set the id
-      flowRun.setId(flowRun.getId());
-      flowActivity.addFlowRun(flowRun);
-    }
-    flowActivity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
-        rowKey.getRowKeyAsString());
-    return flowActivity;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
deleted file mode 100644
index af043b3..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
+++ /dev/null
@@ -1,294 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTable;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for flow run entities that are stored in the flow run
- * table.
- */
-class FlowRunEntityReader extends TimelineEntityReader {
-  private static final FlowRunTable FLOW_RUN_TABLE = new FlowRunTable();
-
-  public FlowRunEntityReader(TimelineReaderContext ctxt,
-      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, entityFilters, toRetrieve);
-  }
-
-  public FlowRunEntityReader(TimelineReaderContext ctxt,
-      TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link FlowRunTable}.
-   */
-  @Override
-  protected BaseTable<?> getTable() {
-    return FLOW_RUN_TABLE;
-  }
-
-  @Override
-  protected void validateParams() {
-    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
-    Preconditions.checkNotNull(getDataToRetrieve(),
-        "data to retrieve shouldn't be null");
-    Preconditions.checkNotNull(getContext().getClusterId(),
-        "clusterId shouldn't be null");
-    Preconditions.checkNotNull(getContext().getUserId(),
-        "userId shouldn't be null");
-    Preconditions.checkNotNull(getContext().getFlowName(),
-        "flowName shouldn't be null");
-    if (isSingleEntityRead()) {
-      Preconditions.checkNotNull(getContext().getFlowRunId(),
-          "flowRunId shouldn't be null");
-    }
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    if (!isSingleEntityRead() && fieldsToRetrieve != null) {
-      for (Field field : fieldsToRetrieve) {
-        if (field != Field.ALL && field != Field.METRICS) {
-          throw new BadRequestException("Invalid field " + field
-              + " specified while querying flow runs.");
-        }
-      }
-    }
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn) {
-    // Add metrics to fields to retrieve if metricsToRetrieve is specified.
-    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
-    if (!isSingleEntityRead()) {
-      createFiltersIfNull();
-    }
-  }
-
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-    FilterList listBasedOnFilters = new FilterList();
-    // Filter based on created time range.
-    Long createdTimeBegin = getFilters().getCreatedTimeBegin();
-    Long createdTimeEnd = getFilters().getCreatedTimeEnd();
-    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils
-          .createSingleColValueFiltersByRange(FlowRunColumn.MIN_START_TIME,
-              createdTimeBegin, createdTimeEnd));
-    }
-    // Filter based on metric filters.
-    TimelineFilterList metricFilters = getFilters().getMetricFilters();
-    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
-          FlowRunColumnPrefix.METRIC, metricFilters));
-    }
-    return listBasedOnFilters;
-  }
-
-  /**
-   * Add {@link QualifierFilter} filters to filter list for each column of flow
-   * run table.
-   *
-   * @return filter list to which qualifier filters have been added.
-   */
-  private FilterList updateFixedColumns() {
-    FilterList columnsList = new FilterList(Operator.MUST_PASS_ONE);
-    for (FlowRunColumn column : FlowRunColumn.values()) {
-      columnsList.addFilter(new QualifierFilter(CompareOp.EQUAL,
-          new BinaryComparator(column.getColumnQualifierBytes())));
-    }
-    return columnsList;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() throws IOException {
-    FilterList list = new FilterList(Operator.MUST_PASS_ONE);
-    // By default fetch everything in INFO column family.
-    FamilyFilter infoColumnFamily =
-        new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(
-            FlowRunColumnFamily.INFO.getBytes()));
-    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
-    // If multiple entities have to be retrieved, check if metrics have to be
-    // retrieved and if not, add a filter so that metrics can be excluded.
-    // Metrics are always returned if we are reading a single entity.
-    if (!isSingleEntityRead()
-        && !hasField(dataToRetrieve.getFieldsToRetrieve(), Field.METRICS)) {
-      FilterList infoColFamilyList = new FilterList(Operator.MUST_PASS_ONE);
-      infoColFamilyList.addFilter(infoColumnFamily);
-      infoColFamilyList.addFilter(new QualifierFilter(CompareOp.NOT_EQUAL,
-          new BinaryPrefixComparator(FlowRunColumnPrefix.METRIC
-              .getColumnPrefixBytes(""))));
-      list.addFilter(infoColFamilyList);
-    } else {
-      // Check if metricsToRetrieve are specified and if they are, create a
-      // filter list for info column family by adding flow run tables columns
-      // and a list for metrics to retrieve. Pls note that fieldsToRetrieve
-      // will have METRICS added to it if metricsToRetrieve are specified
-      // (in augmentParams()).
-      TimelineFilterList metricsToRetrieve =
-          dataToRetrieve.getMetricsToRetrieve();
-      if (metricsToRetrieve != null
-          && !metricsToRetrieve.getFilterList().isEmpty()) {
-        FilterList infoColFamilyList = new FilterList();
-        infoColFamilyList.addFilter(infoColumnFamily);
-        FilterList columnsList = updateFixedColumns();
-        columnsList.addFilter(TimelineFilterUtils.createHBaseFilterList(
-            FlowRunColumnPrefix.METRIC, metricsToRetrieve));
-        infoColFamilyList.addFilter(columnsList);
-        list.addFilter(infoColFamilyList);
-      }
-    }
-    return list;
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException {
-    TimelineReaderContext context = getContext();
-    FlowRunRowKey flowRunRowKey =
-        new FlowRunRowKey(context.getClusterId(), context.getUserId(),
-            context.getFlowName(), context.getFlowRunId());
-    byte[] rowKey = flowRunRowKey.getRowKey();
-    Get get = new Get(rowKey);
-    get.setMaxVersions(Integer.MAX_VALUE);
-    if (filterList != null && !filterList.getFilters().isEmpty()) {
-      get.setFilter(filterList);
-    }
-    return getTable().getResult(hbaseConf, conn, get);
-  }
-
-  @Override
-  protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException {
-    Scan scan = new Scan();
-    TimelineReaderContext context = getContext();
-    RowKeyPrefix<FlowRunRowKey> flowRunRowKeyPrefix = null;
-    if (getFilters().getFromId() == null) {
-      flowRunRowKeyPrefix = new FlowRunRowKeyPrefix(context.getClusterId(),
-          context.getUserId(), context.getFlowName());
-      scan.setRowPrefixFilter(flowRunRowKeyPrefix.getRowKeyPrefix());
-    } else {
-      FlowRunRowKey flowRunRowKey = null;
-      try {
-        flowRunRowKey =
-            FlowRunRowKey.parseRowKeyFromString(getFilters().getFromId());
-      } catch (IllegalArgumentException e) {
-        throw new BadRequestException("Invalid filter fromid is provided.");
-      }
-      if (!context.getClusterId().equals(flowRunRowKey.getClusterId())) {
-        throw new BadRequestException(
-            "fromid doesn't belong to clusterId=" + context.getClusterId());
-      }
-      // set start row
-      scan.setStartRow(flowRunRowKey.getRowKey());
-
-      // get the bytes for stop row
-      flowRunRowKeyPrefix = new FlowRunRowKeyPrefix(context.getClusterId(),
-          context.getUserId(), context.getFlowName());
-
-      // set stop row
-      scan.setStopRow(
-          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
-              flowRunRowKeyPrefix.getRowKeyPrefix()));
-    }
-
-    FilterList newList = new FilterList();
-    newList.addFilter(new PageFilter(getFilters().getLimit()));
-    if (filterList != null && !filterList.getFilters().isEmpty()) {
-      newList.addFilter(filterList);
-    }
-    scan.setFilter(newList);
-    scan.setMaxVersions(Integer.MAX_VALUE);
-    return getTable().getResultScanner(hbaseConf, conn, scan);
-  }
-
-  @Override
-  protected TimelineEntity parseEntity(Result result) throws IOException {
-    FlowRunEntity flowRun = new FlowRunEntity();
-    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(result.getRow());
-    flowRun.setRunId(rowKey.getFlowRunId());
-    flowRun.setUser(rowKey.getUserId());
-    flowRun.setName(rowKey.getFlowName());
-
-    // read the start time
-    Long startTime = (Long) FlowRunColumn.MIN_START_TIME.readResult(result);
-    if (startTime != null) {
-      flowRun.setStartTime(startTime.longValue());
-    }
-
-    // read the end time if available
-    Long endTime = (Long) FlowRunColumn.MAX_END_TIME.readResult(result);
-    if (endTime != null) {
-      flowRun.setMaxEndTime(endTime.longValue());
-    }
-
-    // read the flow version
-    String version = (String) FlowRunColumn.FLOW_VERSION.readResult(result);
-    if (version != null) {
-      flowRun.setVersion(version);
-    }
-
-    // read metrics if its a single entity query or if METRICS are part of
-    // fieldsToRetrieve.
-    if (isSingleEntityRead()
-        || hasField(getDataToRetrieve().getFieldsToRetrieve(), Field.METRICS)) {
-      readMetrics(flowRun, result, FlowRunColumnPrefix.METRIC);
-    }
-
-    // set the id
-    flowRun.setId(flowRun.getId());
-    flowRun.getInfo().put(TimelineReaderUtils.FROMID_KEY,
-        rowKey.getRowKeyAsString());
-    return flowRun;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
deleted file mode 100644
index 3a44445..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
+++ /dev/null
@@ -1,651 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Query;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTable;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Timeline entity reader for generic entities that are stored in the entity
- * table.
- */
-class GenericEntityReader extends TimelineEntityReader {
-  private static final EntityTable ENTITY_TABLE = new EntityTable();
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter<String> stringKeyConverter =
-      new StringKeyConverter();
-
-  public GenericEntityReader(TimelineReaderContext ctxt,
-      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, entityFilters, toRetrieve);
-  }
-
-  public GenericEntityReader(TimelineReaderContext ctxt,
-      TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link EntityTable}.
-   */
-  protected BaseTable<?> getTable() {
-    return ENTITY_TABLE;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-    // Filters here cannot be null for multiple entity reads as they are set in
-    // augmentParams if null.
-    FilterList listBasedOnFilters = new FilterList();
-    TimelineEntityFilters filters = getFilters();
-    // Create filter list based on created time range and add it to
-    // listBasedOnFilters.
-    long createdTimeBegin = filters.getCreatedTimeBegin();
-    long createdTimeEnd = filters.getCreatedTimeEnd();
-    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils
-          .createSingleColValueFiltersByRange(EntityColumn.CREATED_TIME,
-              createdTimeBegin, createdTimeEnd));
-    }
-    // Create filter list based on metric filters and add it to
-    // listBasedOnFilters.
-    TimelineFilterList metricFilters = filters.getMetricFilters();
-    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
-          EntityColumnPrefix.METRIC, metricFilters));
-    }
-    // Create filter list based on config filters and add it to
-    // listBasedOnFilters.
-    TimelineFilterList configFilters = filters.getConfigFilters();
-    if (configFilters != null && !configFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
-          EntityColumnPrefix.CONFIG, configFilters));
-    }
-    // Create filter list based on info filters and add it to listBasedOnFilters
-    TimelineFilterList infoFilters = filters.getInfoFilters();
-    if (infoFilters != null && !infoFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
-          EntityColumnPrefix.INFO, infoFilters));
-    }
-    return listBasedOnFilters;
-  }
-
-  /**
-   * Check if we need to fetch only some of the event columns.
-   *
-   * @return true if we need to fetch some of the columns, false otherwise.
-   */
-  protected boolean fetchPartialEventCols(TimelineFilterList eventFilters,
-      EnumSet<Field> fieldsToRetrieve) {
-    return (eventFilters != null && !eventFilters.getFilterList().isEmpty() &&
-        !hasField(fieldsToRetrieve, Field.EVENTS));
-  }
-
-  /**
-   * Check if we need to fetch only some of the relates_to columns.
-   *
-   * @return true if we need to fetch some of the columns, false otherwise.
-   */
-  protected boolean fetchPartialRelatesToCols(TimelineFilterList relatesTo,
-      EnumSet<Field> fieldsToRetrieve) {
-    return (relatesTo != null && !relatesTo.getFilterList().isEmpty() &&
-        !hasField(fieldsToRetrieve, Field.RELATES_TO));
-  }
-
-  /**
-   * Check if we need to fetch only some of the is_related_to columns.
-   *
-   * @return true if we need to fetch some of the columns, false otherwise.
-   */
-  private boolean fetchPartialIsRelatedToCols(TimelineFilterList isRelatedTo,
-      EnumSet<Field> fieldsToRetrieve) {
-    return (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty() &&
-        !hasField(fieldsToRetrieve, Field.IS_RELATED_TO));
-  }
-
-  /**
-   * Check if we need to fetch only some of the columns based on event filters,
-   * relatesto and isrelatedto from info family.
-   *
-   * @return true, if we need to fetch only some of the columns, false if we
-   *         need to fetch all the columns under info column family.
-   */
-  protected boolean fetchPartialColsFromInfoFamily() {
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    TimelineEntityFilters filters = getFilters();
-    return fetchPartialEventCols(filters.getEventFilters(), fieldsToRetrieve)
-        || fetchPartialRelatesToCols(filters.getRelatesTo(), fieldsToRetrieve)
-        || fetchPartialIsRelatedToCols(filters.getIsRelatedTo(),
-            fieldsToRetrieve);
-  }
-
-  /**
-   * Check if we need to create filter list based on fields. We need to create a
-   * filter list iff all fields need not be retrieved or we have some specific
-   * fields or metrics to retrieve. We also need to create a filter list if we
-   * have relationships(relatesTo/isRelatedTo) and event filters specified for
-   * the query.
-   *
-   * @return true if we need to create the filter list, false otherwise.
-   */
-  protected boolean needCreateFilterListBasedOnFields() {
-    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
-    // Check if all fields are to be retrieved or not. If all fields have to
-    // be retrieved, also check if we have some metrics or configs to
-    // retrieve specified for the query because then a filter list will have
-    // to be created.
-    boolean flag =
-        !dataToRetrieve.getFieldsToRetrieve().contains(Field.ALL)
-            || (dataToRetrieve.getConfsToRetrieve() != null && !dataToRetrieve
-                .getConfsToRetrieve().getFilterList().isEmpty())
-            || (dataToRetrieve.getMetricsToRetrieve() != null && !dataToRetrieve
-                .getMetricsToRetrieve().getFilterList().isEmpty());
-    // Filters need to be checked only if we are reading multiple entities. If
-    // condition above is false, we check if there are relationships(relatesTo/
-    // isRelatedTo) and event filters specified for the query.
-    if (!flag && !isSingleEntityRead()) {
-      TimelineEntityFilters filters = getFilters();
-      flag =
-          (filters.getEventFilters() != null && !filters.getEventFilters()
-              .getFilterList().isEmpty())
-              || (filters.getIsRelatedTo() != null && !filters.getIsRelatedTo()
-                  .getFilterList().isEmpty())
-              || (filters.getRelatesTo() != null && !filters.getRelatesTo()
-                  .getFilterList().isEmpty());
-    }
-    return flag;
-  }
-
-  /**
-   * Add {@link QualifierFilter} filters to filter list for each column of
-   * entity table.
-   *
-   * @param list filter list to which qualifier filters have to be added.
-   */
-  protected void updateFixedColumns(FilterList list) {
-    for (EntityColumn column : EntityColumn.values()) {
-      list.addFilter(new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(
-          column.getColumnQualifierBytes())));
-    }
-  }
-
-  /**
-   * Creates a filter list which indicates that only some of the column
-   * qualifiers in the info column family will be returned in result.
-   *
-   * @param isApplication If true, it means operations are to be performed for
-   *          application table, otherwise for entity table.
-   * @return filter list.
-   * @throws IOException if any problem occurs while creating filter list.
-   */
-  private FilterList createFilterListForColsOfInfoFamily() throws IOException {
-    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
-    // Add filters for each column in entity table.
-    updateFixedColumns(infoFamilyColsFilter);
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // If INFO field has to be retrieved, add a filter for fetching columns
-    // with INFO column prefix.
-    if (hasField(fieldsToRetrieve, Field.INFO)) {
-      infoFamilyColsFilter
-          .addFilter(TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.EQUAL, EntityColumnPrefix.INFO));
-    }
-    TimelineFilterList relatesTo = getFilters().getRelatesTo();
-    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-      // If RELATES_TO field has to be retrieved, add a filter for fetching
-      // columns with RELATES_TO column prefix.
-      infoFamilyColsFilter.addFilter(TimelineFilterUtils
-          .createHBaseQualifierFilter(CompareOp.EQUAL,
-              EntityColumnPrefix.RELATES_TO));
-    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
-      // Even if fields to retrieve does not contain RELATES_TO, we still
-      // need to have a filter to fetch some of the column qualifiers if
-      // relatesTo filters are specified. relatesTo filters will then be
-      // matched after fetching rows from HBase.
-      Set<String> relatesToCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          EntityColumnPrefix.RELATES_TO, relatesToCols));
-    }
-    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
-    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
-      // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
-      // columns with IS_RELATED_TO column prefix.
-      infoFamilyColsFilter.addFilter(TimelineFilterUtils
-          .createHBaseQualifierFilter(CompareOp.EQUAL,
-              EntityColumnPrefix.IS_RELATED_TO));
-    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
-      // Even if fields to retrieve does not contain IS_RELATED_TO, we still
-      // need to have a filter to fetch some of the column qualifiers if
-      // isRelatedTo filters are specified. isRelatedTo filters will then be
-      // matched after fetching rows from HBase.
-      Set<String> isRelatedToCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          EntityColumnPrefix.IS_RELATED_TO, isRelatedToCols));
-    }
-    TimelineFilterList eventFilters = getFilters().getEventFilters();
-    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
-      // If EVENTS field has to be retrieved, add a filter for fetching columns
-      // with EVENT column prefix.
-      infoFamilyColsFilter
-          .addFilter(TimelineFilterUtils.createHBaseQualifierFilter(
-              CompareOp.EQUAL, EntityColumnPrefix.EVENT));
-    } else if (eventFilters != null &&
-        !eventFilters.getFilterList().isEmpty()) {
-      // Even if fields to retrieve does not contain EVENTS, we still need to
-      // have a filter to fetch some of the column qualifiers on the basis of
-      // event filters specified. Event filters will then be matched after
-      // fetching rows from HBase.
-      Set<String> eventCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          EntityColumnPrefix.EVENT, eventCols));
-    }
-    return infoFamilyColsFilter;
-  }
-
-  /**
-   * Exclude column prefixes via filters which are not required(based on fields
-   * to retrieve) from info column family. These filters are added to filter
-   * list which contains a filter for getting info column family.
-   *
-   * @param infoColFamilyList filter list for info column family.
-   */
-  private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // Events not required.
-    if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
-      infoColFamilyList.addFilter(TimelineFilterUtils
-          .createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
-              EntityColumnPrefix.EVENT));
-    }
-    // info not required.
-    if (!hasField(fieldsToRetrieve, Field.INFO)) {
-      infoColFamilyList.addFilter(TimelineFilterUtils
-          .createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
-              EntityColumnPrefix.INFO));
-    }
-    // is related to not required.
-    if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
-      infoColFamilyList.addFilter(TimelineFilterUtils
-          .createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
-              EntityColumnPrefix.IS_RELATED_TO));
-    }
-    // relates to not required.
-    if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-      infoColFamilyList.addFilter(TimelineFilterUtils
-          .createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
-              EntityColumnPrefix.RELATES_TO));
-    }
-  }
-
-  /**
-   * Updates filter list based on fields for confs and metrics to retrieve.
-   *
-   * @param listBasedOnFields filter list based on fields.
-   * @throws IOException if any problem occurs while updating filter list.
-   */
-  private void updateFilterForConfsAndMetricsToRetrieve(
-      FilterList listBasedOnFields) throws IOException {
-    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
-    // Please note that if confsToRetrieve is specified, we would have added
-    // CONFS to fields to retrieve in augmentParams() even if not specified.
-    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
-      // Create a filter list for configs.
-      listBasedOnFields.addFilter(TimelineFilterUtils
-          .createFilterForConfsOrMetricsToRetrieve(
-              dataToRetrieve.getConfsToRetrieve(), EntityColumnFamily.CONFIGS,
-              EntityColumnPrefix.CONFIG));
-    }
-
-    // Please note that if metricsToRetrieve is specified, we would have added
-    // METRICS to fields to retrieve in augmentParams() even if not specified.
-    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
-      // Create a filter list for metrics.
-      listBasedOnFields.addFilter(TimelineFilterUtils
-          .createFilterForConfsOrMetricsToRetrieve(
-              dataToRetrieve.getMetricsToRetrieve(),
-              EntityColumnFamily.METRICS, EntityColumnPrefix.METRIC));
-    }
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() throws IOException {
-    if (!needCreateFilterListBasedOnFields()) {
-      // Fetch all the columns. No need of a filter.
-      return null;
-    }
-    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
-    FilterList infoColFamilyList = new FilterList();
-    // By default fetch everything in INFO column family.
-    FamilyFilter infoColumnFamily =
-        new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(
-            EntityColumnFamily.INFO.getBytes()));
-    infoColFamilyList.addFilter(infoColumnFamily);
-    if (!isSingleEntityRead() && fetchPartialColsFromInfoFamily()) {
-      // We can fetch only some of the columns from info family.
-      infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
-    } else {
-      // Exclude column prefixes in info column family which are not required
-      // based on fields to retrieve.
-      excludeFieldsFromInfoColFamily(infoColFamilyList);
-    }
-    listBasedOnFields.addFilter(infoColFamilyList);
-    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
-    return listBasedOnFields;
-  }
-
-  @Override
-  protected void validateParams() {
-    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
-    Preconditions.checkNotNull(getDataToRetrieve(),
-        "data to retrieve shouldn't be null");
-    Preconditions.checkNotNull(getContext().getClusterId(),
-        "clusterId shouldn't be null");
-    Preconditions.checkNotNull(getContext().getAppId(),
-        "appId shouldn't be null");
-    Preconditions.checkNotNull(getContext().getEntityType(),
-        "entityType shouldn't be null");
-    if (isSingleEntityRead()) {
-      Preconditions.checkNotNull(getContext().getEntityId(),
-          "entityId shouldn't be null");
-    }
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-      throws IOException {
-    defaultAugmentParams(hbaseConf, conn);
-    // Add configs/metrics to fields to retrieve if confsToRetrieve and/or
-    // metricsToRetrieve are specified.
-    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
-    if (!isSingleEntityRead()) {
-      createFiltersIfNull();
-    }
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException {
-    TimelineReaderContext context = getContext();
-    Result result = null;
-    if (context.getEntityIdPrefix() != null) {
-      byte[] rowKey = new EntityRowKey(context.getClusterId(),
-          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
-          context.getAppId(), context.getEntityType(),
-          context.getEntityIdPrefix(), context.getEntityId()).getRowKey();
-      Get get = new Get(rowKey);
-      setMetricsTimeRange(get);
-      get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
-      if (filterList != null && !filterList.getFilters().isEmpty()) {
-        get.setFilter(filterList);
-      }
-      result = getTable().getResult(hbaseConf, conn, get);
-
-    } else {
-      // Prepare for range scan
-      // create single SingleColumnValueFilter and add to existing filters.
-      FilterList filter = new FilterList(Operator.MUST_PASS_ALL);
-      if (filterList != null && !filterList.getFilters().isEmpty()) {
-        filter.addFilter(filterList);
-      }
-      FilterList newFilter = new FilterList();
-      newFilter.addFilter(TimelineFilterUtils.createHBaseSingleColValueFilter(
-          EntityColumn.ID, context.getEntityId(), CompareOp.EQUAL));
-      newFilter.addFilter(new PageFilter(1));
-      filter.addFilter(newFilter);
-
-      ResultScanner results = getResults(hbaseConf, conn, filter);
-      try {
-        Iterator<Result> iterator = results.iterator();
-        if (iterator.hasNext()) {
-          result = iterator.next();
-        }
-      } finally {
-        results.close();
-      }
-    }
-    return result;
-  }
-
-  private void setMetricsTimeRange(Query query) {
-    // Set time range for metric values.
-    HBaseTimelineStorageUtils.setMetricsTimeRange(
-        query, EntityColumnFamily.METRICS.getBytes(),
-        getDataToRetrieve().getMetricsTimeBegin(),
-        getDataToRetrieve().getMetricsTimeEnd());
-  }
-
-  @Override
-  protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException {
-    // Scan through part of the table to find the entities belong to one app
-    // and one type
-    Scan scan = new Scan();
-    TimelineReaderContext context = getContext();
-    RowKeyPrefix<EntityRowKey> entityRowKeyPrefix = null;
-    // default mode, will always scans from beginning of entity type.
-    if (getFilters() == null || getFilters().getFromId() == null) {
-      entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
-          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
-          context.getAppId(), context.getEntityType(), null, null);
-      scan.setRowPrefixFilter(entityRowKeyPrefix.getRowKeyPrefix());
-    } else { // pagination mode, will scan from given entityIdPrefix!enitityId
-
-      EntityRowKey entityRowKey = null;
-      try {
-        entityRowKey =
-            EntityRowKey.parseRowKeyFromString(getFilters().getFromId());
-      } catch (IllegalArgumentException e) {
-        throw new BadRequestException("Invalid filter fromid is provided.");
-      }
-      if (!context.getClusterId().equals(entityRowKey.getClusterId())) {
-        throw new BadRequestException(
-            "fromid doesn't belong to clusterId=" + context.getClusterId());
-      }
-
-      // set start row
-      scan.setStartRow(entityRowKey.getRowKey());
-
-      // get the bytes for stop row
-      entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
-          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
-          context.getAppId(), context.getEntityType(), null, null);
-
-      // set stop row
-      scan.setStopRow(
-          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
-              entityRowKeyPrefix.getRowKeyPrefix()));
-
-      // set page filter to limit. This filter has to set only in pagination
-      // mode.
-      filterList.addFilter(new PageFilter(getFilters().getLimit()));
-    }
-    setMetricsTimeRange(scan);
-    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
-    if (filterList != null && !filterList.getFilters().isEmpty()) {
-      scan.setFilter(filterList);
-    }
-    return getTable().getResultScanner(hbaseConf, conn, scan);
-  }
-
-  @Override
-  protected TimelineEntity parseEntity(Result result) throws IOException {
-    if (result == null || result.isEmpty()) {
-      return null;
-    }
-    TimelineEntity entity = new TimelineEntity();
-    EntityRowKey parseRowKey = EntityRowKey.parseRowKey(result.getRow());
-    entity.setType(parseRowKey.getEntityType());
-    entity.setId(parseRowKey.getEntityId());
-    entity.setIdPrefix(parseRowKey.getEntityIdPrefix().longValue());
-
-    TimelineEntityFilters filters = getFilters();
-    // fetch created time
-    Long createdTime = (Long) EntityColumn.CREATED_TIME.readResult(result);
-    entity.setCreatedTime(createdTime);
-
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // fetch is related to entities and match isRelatedTo filter. If isRelatedTo
-    // filters do not match, entity would be dropped. We have to match filters
-    // locally as relevant HBase filters to filter out rows on the basis of
-    // isRelatedTo are not set in HBase scan.
-    boolean checkIsRelatedTo =
-        !isSingleEntityRead() && filters.getIsRelatedTo() != null
-            && filters.getIsRelatedTo().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO) || checkIsRelatedTo) {
-      readRelationship(entity, result, EntityColumnPrefix.IS_RELATED_TO, true);
-      if (checkIsRelatedTo
-          && !TimelineStorageUtils.matchIsRelatedTo(entity,
-              filters.getIsRelatedTo())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
-        entity.getIsRelatedToEntities().clear();
-      }
-    }
-
-    // fetch relates to entities and match relatesTo filter. If relatesTo
-    // filters do not match, entity would be dropped. We have to match filters
-    // locally as relevant HBase filters to filter out rows on the basis of
-    // relatesTo are not set in HBase scan.
-    boolean checkRelatesTo =
-        !isSingleEntityRead() && filters.getRelatesTo() != null
-            && filters.getRelatesTo().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.RELATES_TO)
-        || checkRelatesTo) {
-      readRelationship(entity, result, EntityColumnPrefix.RELATES_TO, false);
-      if (checkRelatesTo
-          && !TimelineStorageUtils.matchRelatesTo(entity,
-              filters.getRelatesTo())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-        entity.getRelatesToEntities().clear();
-      }
-    }
-
-    // fetch info if fieldsToRetrieve contains INFO or ALL.
-    if (hasField(fieldsToRetrieve, Field.INFO)) {
-      readKeyValuePairs(entity, result, EntityColumnPrefix.INFO, false);
-    }
-
-    // fetch configs if fieldsToRetrieve contains CONFIGS or ALL.
-    if (hasField(fieldsToRetrieve, Field.CONFIGS)) {
-      readKeyValuePairs(entity, result, EntityColumnPrefix.CONFIG, true);
-    }
-
-    // fetch events and match event filters if they exist. If event filters do
-    // not match, entity would be dropped. We have to match filters locally
-    // as relevant HBase filters to filter out rows on the basis of events
-    // are not set in HBase scan.
-    boolean checkEvents =
-        !isSingleEntityRead() && filters.getEventFilters() != null
-            && filters.getEventFilters().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.EVENTS) || checkEvents) {
-      readEvents(entity, result, EntityColumnPrefix.EVENT);
-      if (checkEvents
-          && !TimelineStorageUtils.matchEventFilters(entity,
-              filters.getEventFilters())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
-        entity.getEvents().clear();
-      }
-    }
-
-    // fetch metrics if fieldsToRetrieve contains METRICS or ALL.
-    if (hasField(fieldsToRetrieve, Field.METRICS)) {
-      readMetrics(entity, result, EntityColumnPrefix.METRIC);
-    }
-
-    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
-        parseRowKey.getRowKeyAsString());
-    return entity;
-  }
-
-  /**
-   * Helper method for reading key-value pairs for either info or config.
-   *
-   * @param <T> Describes the type of column prefix.
-   * @param entity entity to fill.
-   * @param result result from HBase.
-   * @param prefix column prefix.
-   * @param isConfig if true, means we are reading configs, otherwise info.
-   * @throws IOException if any problem is encountered while reading result.
-   */
-  protected <T> void readKeyValuePairs(TimelineEntity entity, Result result,
-      ColumnPrefix<T> prefix, boolean isConfig) throws IOException {
-    // info and configuration are of type Map<String, Object or String>
-    Map<String, Object> columns =
-        prefix.readResults(result, stringKeyConverter);
-    if (isConfig) {
-      for (Map.Entry<String, Object> column : columns.entrySet()) {
-        entity.addConfig(column.getKey(), column.getValue().toString());
-      }
-    } else {
-      entity.addInfo(columns);
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
deleted file mode 100644
index f521cd7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
+++ /dev/null
@@ -1,217 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-/**
- * Identifies partially qualified columns for the {@link FlowRunTable}.
- */
-public enum FlowRunColumnPrefix implements ColumnPrefix<FlowRunTable> {
-
-  /**
-   * To store flow run info values.
-   */
-  METRIC(FlowRunColumnFamily.INFO, "m", null, new LongConverter());
-
-  private final ColumnHelper<FlowRunTable> column;
-  private final ColumnFamily<FlowRunTable> columnFamily;
-
-  /**
-   * Can be null for those cases where the provided column qualifier is the
-   * entire column name.
-   */
-  private final String columnPrefix;
-  private final byte[] columnPrefixBytes;
-
-  private final AggregationOperation aggOp;
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   */
-  private FlowRunColumnPrefix(ColumnFamily<FlowRunTable> columnFamily,
-      String columnPrefix, AggregationOperation fra, ValueConverter converter) {
-    this(columnFamily, columnPrefix, fra, converter, false);
-  }
-
-  private FlowRunColumnPrefix(ColumnFamily<FlowRunTable> columnFamily,
-      String columnPrefix, AggregationOperation fra, ValueConverter converter,
-      boolean compoundColQual) {
-    column = new ColumnHelper<FlowRunTable>(columnFamily, converter, true);
-    this.columnFamily = columnFamily;
-    this.columnPrefix = columnPrefix;
-    if (columnPrefix == null) {
-      this.columnPrefixBytes = null;
-    } else {
-      // Future-proof by ensuring the right column prefix hygiene.
-      this.columnPrefixBytes =
-          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
-    }
-    this.aggOp = fra;
-  }
-
-  /**
-   * @return the column name value
-   */
-  public String getColumnPrefix() {
-    return columnPrefix;
-  }
-
-  public byte[] getColumnPrefixBytes() {
-    return columnPrefixBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(this.columnPrefixBytes,
-        qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(this.columnPrefixBytes,
-        qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  public AggregationOperation getAttribute() {
-    return aggOp;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<FlowRunTable> tableMutator, String qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-    Attribute[] combinedAttributes =
-        HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        combinedAttributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<FlowRunTable> tableMutator, byte[] qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-    Attribute[] combinedAttributes =
-        HBaseTimelineStorageUtils.combineAttributes(attributes, this.aggOp);
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        combinedAttributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
-   */
-  public Object readResult(Result result, String qualifier) throws IOException {
-    byte[] columnQualifier =
-        ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier);
-    return column.readResult(result, columnQualifier);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResults(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K> Map<K, Object> readResults(Result result,
-      KeyConverter<K> keyConverter) throws IOException {
-    return column.readResults(result, columnPrefixBytes, keyConverter);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
-      readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
-      throws IOException {
-    return column.readResultsWithTimestamps(result, columnPrefixBytes,
-        keyConverter);
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
deleted file mode 100644
index 96a7cf3..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunCoprocessor.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Coprocessor for flow run table.
- */
-public class FlowRunCoprocessor extends BaseRegionObserver {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(FlowRunCoprocessor.class);
-
-  private Region region;
-  /**
-   * generate a timestamp that is unique per row in a region this is per region.
-   */
-  private final TimestampGenerator timestampGenerator =
-      new TimestampGenerator();
-
-  @Override
-  public void start(CoprocessorEnvironment e) throws IOException {
-    if (e instanceof RegionCoprocessorEnvironment) {
-      RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment) e;
-      this.region = env.getRegion();
-    }
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * This method adds the tags onto the cells in the Put. It is presumed that
-   * all the cells in one Put have the same set of Tags. The existing cell
-   * timestamp is overwritten for non-metric cells and each such cell gets a new
-   * unique timestamp generated by {@link TimestampGenerator}
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#prePut(org.apache
-   * .hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Put,
-   * org.apache.hadoop.hbase.regionserver.wal.WALEdit,
-   * org.apache.hadoop.hbase.client.Durability)
-   */
-  @Override
-  public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put put,
-      WALEdit edit, Durability durability) throws IOException {
-    Map<String, byte[]> attributes = put.getAttributesMap();
-    // Assumption is that all the cells in a put are the same operation.
-    List<Tag> tags = new ArrayList<>();
-    if ((attributes != null) && (attributes.size() > 0)) {
-      for (Map.Entry<String, byte[]> attribute : attributes.entrySet()) {
-        Tag t = HBaseTimelineStorageUtils.getTagFromAttribute(attribute);
-        if (t != null) {
-          tags.add(t);
-        }
-      }
-      byte[] tagByteArray = Tag.fromList(tags);
-      NavigableMap<byte[], List<Cell>> newFamilyMap = new TreeMap<>(
-          Bytes.BYTES_COMPARATOR);
-      for (Map.Entry<byte[], List<Cell>> entry : put.getFamilyCellMap()
-          .entrySet()) {
-        List<Cell> newCells = new ArrayList<>(entry.getValue().size());
-        for (Cell cell : entry.getValue()) {
-          // for each cell in the put add the tags
-          // Assumption is that all the cells in
-          // one put are the same operation
-          // also, get a unique cell timestamp for non-metric cells
-          // this way we don't inadvertently overwrite cell versions
-          long cellTimestamp = getCellTimestamp(cell.getTimestamp(), tags);
-          newCells.add(CellUtil.createCell(CellUtil.cloneRow(cell),
-              CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
-              cellTimestamp, KeyValue.Type.Put, CellUtil.cloneValue(cell),
-              tagByteArray));
-        }
-        newFamilyMap.put(entry.getKey(), newCells);
-      } // for each entry
-      // Update the family map for the Put
-      put.setFamilyCellMap(newFamilyMap);
-    }
-  }
-
-  /**
-   * Determines if the current cell's timestamp is to be used or a new unique
-   * cell timestamp is to be used. The reason this is done is to inadvertently
-   * overwrite cells when writes come in very fast. But for metric cells, the
-   * cell timestamp signifies the metric timestamp. Hence we don't want to
-   * overwrite it.
-   *
-   * @param timestamp
-   * @param tags
-   * @return cell timestamp
-   */
-  private long getCellTimestamp(long timestamp, List<Tag> tags) {
-    // if ts not set (hbase sets to HConstants.LATEST_TIMESTAMP by default)
-    // then use the generator
-    if (timestamp == HConstants.LATEST_TIMESTAMP) {
-      return timestampGenerator.getUniqueTimestamp();
-    } else {
-      return timestamp;
-    }
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * Creates a {@link FlowScanner} Scan so that it can correctly process the
-   * contents of {@link FlowRunTable}.
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#preGetOp(org.apache
-   * .hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Get, java.util.List)
-   */
-  @Override
-  public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e,
-      Get get, List<Cell> results) throws IOException {
-    Scan scan = new Scan(get);
-    scan.setMaxVersions();
-    RegionScanner scanner = null;
-    try {
-      scanner = new FlowScanner(e.getEnvironment(), scan,
-          region.getScanner(scan), FlowScannerOperation.READ);
-      scanner.next(results);
-      e.bypass();
-    } finally {
-      if (scanner != null) {
-        scanner.close();
-      }
-    }
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * Ensures that max versions are set for the Scan so that metrics can be
-   * correctly aggregated and min/max can be correctly determined.
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#preScannerOpen(org
-   * .apache.hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Scan,
-   * org.apache.hadoop.hbase.regionserver.RegionScanner)
-   */
-  @Override
-  public RegionScanner preScannerOpen(
-      ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
-      RegionScanner scanner) throws IOException {
-    // set max versions for scan to see all
-    // versions to aggregate for metrics
-    scan.setMaxVersions();
-    return scanner;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * Creates a {@link FlowScanner} Scan so that it can correctly process the
-   * contents of {@link FlowRunTable}.
-   *
-   * @see
-   * org.apache.hadoop.hbase.coprocessor.BaseRegionObserver#postScannerOpen(
-   * org.apache.hadoop.hbase.coprocessor.ObserverContext,
-   * org.apache.hadoop.hbase.client.Scan,
-   * org.apache.hadoop.hbase.regionserver.RegionScanner)
-   */
-  @Override
-  public RegionScanner postScannerOpen(
-      ObserverContext<RegionCoprocessorEnvironment> e, Scan scan,
-      RegionScanner scanner) throws IOException {
-    return new FlowScanner(e.getEnvironment(), scan,
-        scanner, FlowScannerOperation.READ);
-  }
-
-  @Override
-  public InternalScanner preFlush(
-      ObserverContext<RegionCoprocessorEnvironment> c, Store store,
-      InternalScanner scanner) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      if (store != null) {
-        LOG.debug("preFlush store = " + store.getColumnFamilyName()
-            + " flushableSize=" + store.getFlushableSize()
-            + " flushedCellsCount=" + store.getFlushedCellsCount()
-            + " compactedCellsCount=" + store.getCompactedCellsCount()
-            + " majorCompactedCellsCount="
-            + store.getMajorCompactedCellsCount() + " memstoreFlushSize="
-            + store.getMemstoreFlushSize() + " memstoreSize="
-            + store.getMemStoreSize() + " size=" + store.getSize()
-            + " storeFilesCount=" + store.getStorefilesCount());
-      }
-    }
-    return new FlowScanner(c.getEnvironment(), scanner,
-        FlowScannerOperation.FLUSH);
-  }
-
-  @Override
-  public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, StoreFile resultFile) {
-    if (LOG.isDebugEnabled()) {
-      if (store != null) {
-        LOG.debug("postFlush store = " + store.getColumnFamilyName()
-            + " flushableSize=" + store.getFlushableSize()
-            + " flushedCellsCount=" + store.getFlushedCellsCount()
-            + " compactedCellsCount=" + store.getCompactedCellsCount()
-            + " majorCompactedCellsCount="
-            + store.getMajorCompactedCellsCount() + " memstoreFlushSize="
-            + store.getMemstoreFlushSize() + " memstoreSize="
-            + store.getMemStoreSize() + " size=" + store.getSize()
-            + " storeFilesCount=" + store.getStorefilesCount());
-      }
-    }
-  }
-
-  @Override
-  public InternalScanner preCompact(
-      ObserverContext<RegionCoprocessorEnvironment> e, Store store,
-      InternalScanner scanner, ScanType scanType, CompactionRequest request)
-      throws IOException {
-
-    FlowScannerOperation requestOp = FlowScannerOperation.MINOR_COMPACTION;
-    if (request != null) {
-      requestOp = (request.isMajor() ? FlowScannerOperation.MAJOR_COMPACTION
-          : FlowScannerOperation.MINOR_COMPACTION);
-      LOG.info("Compactionrequest= " + request.toString() + " "
-          + requestOp.toString() + " RegionName=" + e.getEnvironment()
-              .getRegion().getRegionInfo().getRegionNameAsString());
-    }
-    return new FlowScanner(e.getEnvironment(), scanner, requestOp);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
deleted file mode 100644
index 7ce91cf..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.util.List;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the flow run table.
- */
-public class FlowRunRowKey {
-  private final String clusterId;
-  private final String userId;
-  private final String flowName;
-  private final Long flowRunId;
-  private final FlowRunRowKeyConverter flowRunRowKeyConverter =
-      new FlowRunRowKeyConverter();
-
-  public FlowRunRowKey(String clusterId, String userId, String flowName,
-      Long flowRunId) {
-    this.clusterId = clusterId;
-    this.userId = userId;
-    this.flowName = flowName;
-    this.flowRunId = flowRunId;
-  }
-
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  public String getUserId() {
-    return userId;
-  }
-
-  public String getFlowName() {
-    return flowName;
-  }
-
-  public Long getFlowRunId() {
-    return flowRunId;
-  }
-
-  /**
-   * Constructs a row key for the entity table as follows: {
-   * clusterId!userId!flowName!Inverted Flow Run Id}.
-   *
-   * @return byte array with the row key
-   */
-  public byte[] getRowKey() {
-    return flowRunRowKeyConverter.encode(this);
-  }
-
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   * @param rowKey Byte representation of row key.
-   * @return A <cite>FlowRunRowKey</cite> object.
-   */
-  public static FlowRunRowKey parseRowKey(byte[] rowKey) {
-    return new FlowRunRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Constructs a row key for the flow run table as follows:
-   * {@code clusterId!userId!flowName!Flow Run Id}.
-   * @return String representation of row key
-   */
-  public String getRowKeyAsString() {
-    return flowRunRowKeyConverter.encodeAsString(this);
-  }
-
-  /**
-   * Given the encoded row key as string, returns the row key as an object.
-   * @param encodedRowKey String representation of row key.
-   * @return A <cite>FlowRunRowKey</cite> object.
-   */
-  public static FlowRunRowKey parseRowKeyFromString(String encodedRowKey) {
-    return new FlowRunRowKeyConverter().decodeFromString(encodedRowKey);
-  }
-
-  /**
-   * returns the Flow Key as a verbose String output.
-   * @return String
-   */
-  @Override
-  public String toString() {
-    StringBuilder flowKeyStr = new StringBuilder();
-    flowKeyStr.append("{clusterId=" + clusterId);
-    flowKeyStr.append(" userId=" + userId);
-    flowKeyStr.append(" flowName=" + flowName);
-    flowKeyStr.append(" flowRunId=");
-    flowKeyStr.append(flowRunId);
-    flowKeyStr.append("}");
-    return flowKeyStr.toString();
-  }
-
-  /**
-   * Encodes and decodes row key for flow run table.
-   * The row key is of the form : clusterId!userId!flowName!flowrunId.
-   * flowrunId is a long and rest are strings.
-   * <p>
-   */
-  final private static class FlowRunRowKeyConverter implements
-      KeyConverter<FlowRunRowKey>, KeyConverterToString<FlowRunRowKey> {
-
-    private FlowRunRowKeyConverter() {
-    }
-
-    /**
-     * The flow run row key is of the form clusterId!userId!flowName!flowrunId
-     * with each segment separated by !. The sizes below indicate sizes of each
-     * one of these segments in sequence. clusterId, userId and flowName are
-     * strings. flowrunId is a long hence 8 bytes in size. Strings are variable
-     * in size (i.e. end whenever separator is encountered). This is used while
-     * decoding and helps in determining where to split.
-     */
-    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
-        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG };
-
-    /*
-     * (non-Javadoc)
-     *
-     * Encodes FlowRunRowKey object into a byte array with each component/field
-     * in FlowRunRowKey separated by Separator#QUALIFIERS. This leads to an flow
-     * run row key of the form clusterId!userId!flowName!flowrunId If flowRunId
-     * in passed FlowRunRowKey object is null (and the fields preceding it i.e.
-     * clusterId, userId and flowName are not null), this returns a row key
-     * prefix of the form clusterId!userName!flowName! flowRunId is inverted
-     * while encoding as it helps maintain a descending order for flow keys in
-     * flow run table.
-     *
-     * @see
-     * org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#encode(java.lang.Object)
-     */
-    @Override
-    public byte[] encode(FlowRunRowKey rowKey) {
-      byte[] first =
-          Separator.QUALIFIERS.join(Separator.encode(rowKey.getClusterId(),
-              Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Separator
-              .encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
-                  Separator.QUALIFIERS), Separator.encode(rowKey.getFlowName(),
-              Separator.SPACE, Separator.TAB, Separator.QUALIFIERS));
-      if (rowKey.getFlowRunId() == null) {
-        return Separator.QUALIFIERS.join(first, Separator.EMPTY_BYTES);
-      } else {
-        // Note that flowRunId is a long, so we can't encode them all at the
-        // same
-        // time.
-        byte[] second =
-            Bytes.toBytes(LongConverter.invertLong(rowKey.getFlowRunId()));
-        return Separator.QUALIFIERS.join(first, second);
-      }
-    }
-
-    /*
-     * (non-Javadoc)
-     *
-     * Decodes an flow run row key of the form
-     * clusterId!userId!flowName!flowrunId represented in byte format and
-     * converts it into an FlowRunRowKey object. flowRunId is inverted while
-     * decoding as it was inverted while encoding.
-     *
-     * @see
-     * org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#decode(byte[])
-     */
-    @Override
-    public FlowRunRowKey decode(byte[] rowKey) {
-      byte[][] rowKeyComponents =
-          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
-      if (rowKeyComponents.length != 4) {
-        throw new IllegalArgumentException("the row key is not valid for "
-            + "a flow run");
-      }
-      String clusterId =
-          Separator.decode(Bytes.toString(rowKeyComponents[0]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String userId =
-          Separator.decode(Bytes.toString(rowKeyComponents[1]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String flowName =
-          Separator.decode(Bytes.toString(rowKeyComponents[2]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      Long flowRunId =
-          LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
-      return new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
-    }
-
-    @Override
-    public String encodeAsString(FlowRunRowKey key) {
-      if (key.clusterId == null || key.userId == null || key.flowName == null
-          || key.flowRunId == null) {
-        throw new IllegalArgumentException();
-      }
-      return TimelineReaderUtils.joinAndEscapeStrings(new String[] {
-          key.clusterId, key.userId, key.flowName, key.flowRunId.toString()});
-    }
-
-    @Override
-    public FlowRunRowKey decodeFromString(String encodedRowKey) {
-      List<String> split = TimelineReaderUtils.split(encodedRowKey);
-      if (split == null || split.size() != 4) {
-        throw new IllegalArgumentException(
-            "Invalid row key for flow run table.");
-      }
-      Long flowRunId = Long.valueOf(split.get(3));
-      return new FlowRunRowKey(split.get(0), split.get(1), split.get(2),
-          flowRunId);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyPrefix.java
deleted file mode 100644
index 23ebc66..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyPrefix.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-
-/**
- * Represents a partial rowkey (without the flowRunId) for the flow run table.
- */
-public class FlowRunRowKeyPrefix extends FlowRunRowKey implements
-    RowKeyPrefix<FlowRunRowKey> {
-
-  /**
-   * Constructs a row key prefix for the flow run table as follows:
-   * {@code clusterId!userI!flowName!}.
-   *
-   * @param clusterId identifying the cluster
-   * @param userId identifying the user
-   * @param flowName identifying the flow
-   */
-  public FlowRunRowKeyPrefix(String clusterId, String userId,
-      String flowName) {
-    super(clusterId, userId, flowName, null);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.application.
-   * RowKeyPrefix#getRowKeyPrefix()
-   */
-  public byte[] getRowKeyPrefix() {
-    // We know we're a FlowRunRowKey with null florRunId, so we can simply
-    // delegate
-    return super.getRowKey();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
deleted file mode 100644
index a1d32ee..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Coprocessor;
-
-/**
- * The flow run table has column family info
- * Stores per flow run information
- * aggregated across applications.
- *
- * Metrics are also stored in the info column family.
- *
- * Example flow run table record:
- *
- * <pre>
- * flow_run table
- * |-------------------------------------------|
- * |  Row key   | Column Family                |
- * |            | info                         |
- * |-------------------------------------------|
- * | clusterId! | flow_version:version7        |
- * | userName!  |                              |
- * | flowName!  | running_apps:1               |
- * | flowRunId  |                              |
- * |            | min_start_time:1392995080000 |
- * |            | #0:""                        |
- * |            |                              |
- * |            | min_start_time:1392995081012 |
- * |            | #0:appId2                    |
- * |            |                              |
- * |            | min_start_time:1392993083210 |
- * |            | #0:appId3                    |
- * |            |                              |
- * |            |                              |
- * |            | max_end_time:1392993084018   |
- * |            | #0:""                        |
- * |            |                              |
- * |            |                              |
- * |            | m!mapInputRecords:127        |
- * |            | #0:""                        |
- * |            |                              |
- * |            | m!mapInputRecords:31         |
- * |            | #2:appId2                    |
- * |            |                              |
- * |            | m!mapInputRecords:37         |
- * |            | #1:appId3                    |
- * |            |                              |
- * |            |                              |
- * |            | m!mapOutputRecords:181       |
- * |            | #0:""                        |
- * |            |                              |
- * |            | m!mapOutputRecords:37        |
- * |            | #1:appId3                    |
- * |            |                              |
- * |            |                              |
- * |-------------------------------------------|
- * </pre>
- */
-public class FlowRunTable extends BaseTable<FlowRunTable> {
-  /** entity prefix. */
-  private static final String PREFIX =
-      YarnConfiguration.TIMELINE_SERVICE_PREFIX + ".flowrun";
-
-  /** config param name that specifies the flowrun table name. */
-  public static final String TABLE_NAME_CONF_NAME = PREFIX + ".table.name";
-
-  /** default value for flowrun table name. */
-  public static final String DEFAULT_TABLE_NAME = "timelineservice.flowrun";
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(FlowRunTable.class);
-
-  /** default max number of versions. */
-  public static final int DEFAULT_METRICS_MAX_VERSIONS = Integer.MAX_VALUE;
-
-  public FlowRunTable() {
-    super(TABLE_NAME_CONF_NAME, DEFAULT_TABLE_NAME);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.BaseTable#createTable
-   * (org.apache.hadoop.hbase.client.Admin,
-   * org.apache.hadoop.conf.Configuration)
-   */
-  public void createTable(Admin admin, Configuration hbaseConf)
-      throws IOException {
-
-    TableName table = getTableName(hbaseConf);
-    if (admin.tableExists(table)) {
-      // do not disable / delete existing table
-      // similar to the approach taken by map-reduce jobs when
-      // output directory exists
-      throw new IOException("Table " + table.getNameAsString()
-          + " already exists.");
-    }
-
-    HTableDescriptor flowRunTableDescp = new HTableDescriptor(table);
-    HColumnDescriptor infoCF =
-        new HColumnDescriptor(FlowRunColumnFamily.INFO.getBytes());
-    infoCF.setBloomFilterType(BloomType.ROWCOL);
-    flowRunTableDescp.addFamily(infoCF);
-    infoCF.setMinVersions(1);
-    infoCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS);
-
-    // TODO: figure the split policy
-    String coprocessorJarPathStr = hbaseConf.get(
-        YarnConfiguration.FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION,
-        YarnConfiguration.DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR);
-
-    Path coprocessorJarPath = new Path(coprocessorJarPathStr);
-    LOG.info("CoprocessorJarPath=" + coprocessorJarPath.toString());
-    flowRunTableDescp.addCoprocessor(
-        FlowRunCoprocessor.class.getCanonicalName(), coprocessorJarPath,
-        Coprocessor.PRIORITY_USER, null);
-    admin.createTable(flowRunTableDescp);
-    LOG.info("Status of table creation for " + table.getNameAsString() + "="
-        + admin.tableExists(table));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
deleted file mode 100644
index dbd0484..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScanner.java
+++ /dev/null
@@ -1,729 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.ScannerContext;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.NumericValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimestampGenerator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Invoked via the coprocessor when a Get or a Scan is issued for flow run
- * table. Looks through the list of cells per row, checks their tags and does
- * operation on those cells as per the cell tags. Transforms reads of the stored
- * metrics into calculated sums for each column Also, finds the min and max for
- * start and end times in a flow run.
- */
-class FlowScanner implements RegionScanner, Closeable {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(FlowScanner.class);
-
-  /**
-   * use a special application id to represent the flow id this is needed since
-   * TimestampGenerator parses the app id to generate a cell timestamp.
-   */
-  private static final String FLOW_APP_ID = "application_00000000000_0000";
-
-  private final Region region;
-  private final InternalScanner flowRunScanner;
-  private final int batchSize;
-  private final long appFinalValueRetentionThreshold;
-  private RegionScanner regionScanner;
-  private boolean hasMore;
-  private byte[] currentRow;
-  private List<Cell> availableCells = new ArrayList<>();
-  private int currentIndex;
-  private FlowScannerOperation action = FlowScannerOperation.READ;
-
-  FlowScanner(RegionCoprocessorEnvironment env, InternalScanner internalScanner,
-      FlowScannerOperation action) {
-    this(env, null, internalScanner, action);
-  }
-
-  FlowScanner(RegionCoprocessorEnvironment env, Scan incomingScan,
-      InternalScanner internalScanner, FlowScannerOperation action) {
-    this.batchSize = incomingScan == null ? -1 : incomingScan.getBatch();
-    // TODO initialize other scan attributes like Scan#maxResultSize
-    this.flowRunScanner = internalScanner;
-    if (internalScanner instanceof RegionScanner) {
-      this.regionScanner = (RegionScanner) internalScanner;
-    }
-    this.action = action;
-    if (env == null) {
-      this.appFinalValueRetentionThreshold =
-          YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD;
-      this.region = null;
-    } else {
-      this.region = env.getRegion();
-      Configuration hbaseConf = env.getConfiguration();
-      this.appFinalValueRetentionThreshold = hbaseConf.getLong(
-          YarnConfiguration.APP_FINAL_VALUE_RETENTION_THRESHOLD,
-          YarnConfiguration.DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(" batch size=" + batchSize);
-    }
-  }
-
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#getRegionInfo()
-   */
-  @Override
-  public HRegionInfo getRegionInfo() {
-    return region.getRegionInfo();
-  }
-
-  @Override
-  public boolean nextRaw(List<Cell> cells) throws IOException {
-    return nextRaw(cells, ScannerContext.newBuilder().build());
-  }
-
-  @Override
-  public boolean nextRaw(List<Cell> cells, ScannerContext scannerContext)
-      throws IOException {
-    return nextInternal(cells, scannerContext);
-  }
-
-  @Override
-  public boolean next(List<Cell> cells) throws IOException {
-    return next(cells, ScannerContext.newBuilder().build());
-  }
-
-  @Override
-  public boolean next(List<Cell> cells, ScannerContext scannerContext)
-      throws IOException {
-    return nextInternal(cells, scannerContext);
-  }
-
-  /**
-   * Get value converter associated with a column or a column prefix. If nothing
-   * matches, generic converter is returned.
-   * @param colQualifierBytes
-   * @return value converter implementation.
-   */
-  private static ValueConverter getValueConverter(byte[] colQualifierBytes) {
-    // Iterate over all the column prefixes for flow run table and get the
-    // appropriate converter for the column qualifier passed if prefix matches.
-    for (FlowRunColumnPrefix colPrefix : FlowRunColumnPrefix.values()) {
-      byte[] colPrefixBytes = colPrefix.getColumnPrefixBytes("");
-      if (Bytes.compareTo(colPrefixBytes, 0, colPrefixBytes.length,
-          colQualifierBytes, 0, colPrefixBytes.length) == 0) {
-        return colPrefix.getValueConverter();
-      }
-    }
-    // Iterate over all the columns for flow run table and get the
-    // appropriate converter for the column qualifier passed if match occurs.
-    for (FlowRunColumn column : FlowRunColumn.values()) {
-      if (Bytes.compareTo(
-          column.getColumnQualifierBytes(), colQualifierBytes) == 0) {
-        return column.getValueConverter();
-      }
-    }
-    // Return generic converter if nothing matches.
-    return GenericConverter.getInstance();
-  }
-
-  /**
-   * This method loops through the cells in a given row of the
-   * {@link FlowRunTable}. It looks at the tags of each cell to figure out how
-   * to process the contents. It then calculates the sum or min or max for each
-   * column or returns the cell as is.
-   *
-   * @param cells
-   * @param scannerContext
-   * @return true if next row is available for the scanner, false otherwise
-   * @throws IOException
-   */
-  private boolean nextInternal(List<Cell> cells, ScannerContext scannerContext)
-      throws IOException {
-    Cell cell = null;
-    startNext();
-    // Loop through all the cells in this row
-    // For min/max/metrics we do need to scan the entire set of cells to get the
-    // right one
-    // But with flush/compaction, the number of cells being scanned will go down
-    // cells are grouped per column qualifier then sorted by cell timestamp
-    // (latest to oldest) per column qualifier
-    // So all cells in one qualifier come one after the other before we see the
-    // next column qualifier
-    ByteArrayComparator comp = new ByteArrayComparator();
-    byte[] previousColumnQualifier = Separator.EMPTY_BYTES;
-    AggregationOperation currentAggOp = null;
-    SortedSet<Cell> currentColumnCells = new TreeSet<>(KeyValue.COMPARATOR);
-    Set<String> alreadySeenAggDim = new HashSet<>();
-    int addedCnt = 0;
-    long currentTimestamp = System.currentTimeMillis();
-    ValueConverter converter = null;
-    int limit = batchSize;
-
-    while (limit <= 0 || addedCnt < limit) {
-      cell = peekAtNextCell(scannerContext);
-      if (cell == null) {
-        break;
-      }
-      byte[] currentColumnQualifier = CellUtil.cloneQualifier(cell);
-      if (previousColumnQualifier == null) {
-        // first time in loop
-        previousColumnQualifier = currentColumnQualifier;
-      }
-
-      converter = getValueConverter(currentColumnQualifier);
-      if (comp.compare(previousColumnQualifier, currentColumnQualifier) != 0) {
-        addedCnt += emitCells(cells, currentColumnCells, currentAggOp,
-            converter, currentTimestamp);
-        resetState(currentColumnCells, alreadySeenAggDim);
-        previousColumnQualifier = currentColumnQualifier;
-        currentAggOp = getCurrentAggOp(cell);
-        converter = getValueConverter(currentColumnQualifier);
-      }
-      collectCells(currentColumnCells, currentAggOp, cell, alreadySeenAggDim,
-          converter, scannerContext);
-      nextCell(scannerContext);
-    }
-    if ((!currentColumnCells.isEmpty()) && ((limit <= 0 || addedCnt < limit))) {
-      addedCnt += emitCells(cells, currentColumnCells, currentAggOp, converter,
-          currentTimestamp);
-      if (LOG.isDebugEnabled()) {
-        if (addedCnt > 0) {
-          LOG.debug("emitted cells. " + addedCnt + " for " + this.action
-              + " rowKey="
-              + FlowRunRowKey.parseRowKey(CellUtil.cloneRow(cells.get(0))));
-        } else {
-          LOG.debug("emitted no cells for " + this.action);
-        }
-      }
-    }
-    return hasMore();
-  }
-
-  private AggregationOperation getCurrentAggOp(Cell cell) {
-    List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
-        cell.getTagsLength());
-    // We assume that all the operations for a particular column are the same
-    return HBaseTimelineStorageUtils.getAggregationOperationFromTagsList(tags);
-  }
-
-  /**
-   * resets the parameters to an initialized state for next loop iteration.
-   *
-   * @param cell
-   * @param currentAggOp
-   * @param currentColumnCells
-   * @param alreadySeenAggDim
-   * @param collectedButNotEmitted
-   */
-  private void resetState(SortedSet<Cell> currentColumnCells,
-      Set<String> alreadySeenAggDim) {
-    currentColumnCells.clear();
-    alreadySeenAggDim.clear();
-  }
-
-  private void collectCells(SortedSet<Cell> currentColumnCells,
-      AggregationOperation currentAggOp, Cell cell,
-      Set<String> alreadySeenAggDim, ValueConverter converter,
-      ScannerContext scannerContext) throws IOException {
-
-    if (currentAggOp == null) {
-      // not a min/max/metric cell, so just return it as is
-      currentColumnCells.add(cell);
-      return;
-    }
-
-    switch (currentAggOp) {
-    case GLOBAL_MIN:
-      if (currentColumnCells.size() == 0) {
-        currentColumnCells.add(cell);
-      } else {
-        Cell currentMinCell = currentColumnCells.first();
-        Cell newMinCell = compareCellValues(currentMinCell, cell, currentAggOp,
-            (NumericValueConverter) converter);
-        if (!currentMinCell.equals(newMinCell)) {
-          currentColumnCells.remove(currentMinCell);
-          currentColumnCells.add(newMinCell);
-        }
-      }
-      break;
-    case GLOBAL_MAX:
-      if (currentColumnCells.size() == 0) {
-        currentColumnCells.add(cell);
-      } else {
-        Cell currentMaxCell = currentColumnCells.first();
-        Cell newMaxCell = compareCellValues(currentMaxCell, cell, currentAggOp,
-            (NumericValueConverter) converter);
-        if (!currentMaxCell.equals(newMaxCell)) {
-          currentColumnCells.remove(currentMaxCell);
-          currentColumnCells.add(newMaxCell);
-        }
-      }
-      break;
-    case SUM:
-    case SUM_FINAL:
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("In collect cells "
-            + " FlowSannerOperation="
-            + this.action
-            + " currentAggOp="
-            + currentAggOp
-            + " cell qualifier="
-            + Bytes.toString(CellUtil.cloneQualifier(cell))
-            + " cell value= "
-            + converter.decodeValue(CellUtil.cloneValue(cell))
-            + " timestamp=" + cell.getTimestamp());
-      }
-
-      // only if this app has not been seen yet, add to current column cells
-      List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
-      String aggDim = HBaseTimelineStorageUtils
-          .getAggregationCompactionDimension(tags);
-      if (!alreadySeenAggDim.contains(aggDim)) {
-        // if this agg dimension has already been seen,
-        // since they show up in sorted order
-        // we drop the rest which are older
-        // in other words, this cell is older than previously seen cells
-        // for that agg dim
-        // but when this agg dim is not seen,
-        // consider this cell in our working set
-        currentColumnCells.add(cell);
-        alreadySeenAggDim.add(aggDim);
-      }
-      break;
-    default:
-      break;
-    } // end of switch case
-  }
-
-  /*
-   * Processes the cells in input param currentColumnCells and populates
-   * List<Cell> cells as the output based on the input AggregationOperation
-   * parameter.
-   */
-  private int emitCells(List<Cell> cells, SortedSet<Cell> currentColumnCells,
-      AggregationOperation currentAggOp, ValueConverter converter,
-      long currentTimestamp) throws IOException {
-    if ((currentColumnCells == null) || (currentColumnCells.size() == 0)) {
-      return 0;
-    }
-    if (currentAggOp == null) {
-      cells.addAll(currentColumnCells);
-      return currentColumnCells.size();
-    }
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("In emitCells " + this.action + " currentColumnCells size= "
-          + currentColumnCells.size() + " currentAggOp" + currentAggOp);
-    }
-
-    switch (currentAggOp) {
-    case GLOBAL_MIN:
-    case GLOBAL_MAX:
-      cells.addAll(currentColumnCells);
-      return currentColumnCells.size();
-    case SUM:
-    case SUM_FINAL:
-      switch (action) {
-      case FLUSH:
-      case MINOR_COMPACTION:
-        cells.addAll(currentColumnCells);
-        return currentColumnCells.size();
-      case READ:
-        Cell sumCell = processSummation(currentColumnCells,
-            (NumericValueConverter) converter);
-        cells.add(sumCell);
-        return 1;
-      case MAJOR_COMPACTION:
-        List<Cell> finalCells = processSummationMajorCompaction(
-            currentColumnCells, (NumericValueConverter) converter,
-            currentTimestamp);
-        cells.addAll(finalCells);
-        return finalCells.size();
-      default:
-        cells.addAll(currentColumnCells);
-        return currentColumnCells.size();
-      }
-    default:
-      cells.addAll(currentColumnCells);
-      return currentColumnCells.size();
-    }
-  }
-
-  /*
-   * Returns a cell whose value is the sum of all cell values in the input set.
-   * The new cell created has the timestamp of the most recent metric cell. The
-   * sum of a metric for a flow run is the summation at the point of the last
-   * metric update in that flow till that time.
-   */
-  private Cell processSummation(SortedSet<Cell> currentColumnCells,
-      NumericValueConverter converter) throws IOException {
-    Number sum = 0;
-    Number currentValue = 0;
-    long ts = 0L;
-    long mostCurrentTimestamp = 0L;
-    Cell mostRecentCell = null;
-    for (Cell cell : currentColumnCells) {
-      currentValue = (Number) converter.decodeValue(CellUtil.cloneValue(cell));
-      ts = cell.getTimestamp();
-      if (mostCurrentTimestamp < ts) {
-        mostCurrentTimestamp = ts;
-        mostRecentCell = cell;
-      }
-      sum = converter.add(sum, currentValue);
-    }
-    byte[] sumBytes = converter.encodeValue(sum);
-    Cell sumCell =
-        HBaseTimelineStorageUtils.createNewCell(mostRecentCell, sumBytes);
-    return sumCell;
-  }
-
-
-  /**
-   * Returns a list of cells that contains
-   *
-   * A) the latest cells for applications that haven't finished yet
-   * B) summation
-   * for the flow, based on applications that have completed and are older than
-   * a certain time
-   *
-   * The new cell created has the timestamp of the most recent metric cell. The
-   * sum of a metric for a flow run is the summation at the point of the last
-   * metric update in that flow till that time.
-   */
-  @VisibleForTesting
-  List<Cell> processSummationMajorCompaction(
-      SortedSet<Cell> currentColumnCells, NumericValueConverter converter,
-      long currentTimestamp)
-      throws IOException {
-    Number sum = 0;
-    Number currentValue = 0;
-    long ts = 0L;
-    boolean summationDone = false;
-    List<Cell> finalCells = new ArrayList<Cell>();
-    if (currentColumnCells == null) {
-      return finalCells;
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("In processSummationMajorCompaction,"
-          + " will drop cells older than " + currentTimestamp
-          + " CurrentColumnCells size=" + currentColumnCells.size());
-    }
-
-    for (Cell cell : currentColumnCells) {
-      AggregationOperation cellAggOp = getCurrentAggOp(cell);
-      // if this is the existing flow sum cell
-      List<Tag> tags = Tag.asList(cell.getTagsArray(), cell.getTagsOffset(),
-          cell.getTagsLength());
-      String appId = HBaseTimelineStorageUtils
-          .getAggregationCompactionDimension(tags);
-      if (appId == FLOW_APP_ID) {
-        sum = converter.add(sum, currentValue);
-        summationDone = true;
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("reading flow app id sum=" + sum);
-        }
-      } else {
-        currentValue = (Number) converter.decodeValue(CellUtil
-            .cloneValue(cell));
-        // read the timestamp truncated by the generator
-        ts =  TimestampGenerator.getTruncatedTimestamp(cell.getTimestamp());
-        if ((cellAggOp == AggregationOperation.SUM_FINAL)
-            && ((ts + this.appFinalValueRetentionThreshold)
-                < currentTimestamp)) {
-          sum = converter.add(sum, currentValue);
-          summationDone = true;
-          if (LOG.isTraceEnabled()) {
-            LOG.trace("MAJOR COMPACTION loop sum= " + sum
-                + " discarding now: " + " qualifier="
-                + Bytes.toString(CellUtil.cloneQualifier(cell)) + " value="
-                + converter.decodeValue(CellUtil.cloneValue(cell))
-                + " timestamp=" + cell.getTimestamp() + " " + this.action);
-          }
-        } else {
-          // not a final value but it's the latest cell for this app
-          // so include this cell in the list of cells to write back
-          finalCells.add(cell);
-        }
-      }
-    }
-    if (summationDone) {
-      Cell anyCell = currentColumnCells.first();
-      List<Tag> tags = new ArrayList<Tag>();
-      Tag t = new Tag(AggregationOperation.SUM_FINAL.getTagType(),
-          Bytes.toBytes(FLOW_APP_ID));
-      tags.add(t);
-      t = new Tag(AggregationCompactionDimension.APPLICATION_ID.getTagType(),
-          Bytes.toBytes(FLOW_APP_ID));
-      tags.add(t);
-      byte[] tagByteArray = Tag.fromList(tags);
-      Cell sumCell = HBaseTimelineStorageUtils.createNewCell(
-          CellUtil.cloneRow(anyCell),
-          CellUtil.cloneFamily(anyCell),
-          CellUtil.cloneQualifier(anyCell),
-          TimestampGenerator.getSupplementedTimestamp(
-              System.currentTimeMillis(), FLOW_APP_ID),
-              converter.encodeValue(sum), tagByteArray);
-      finalCells.add(sumCell);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("MAJOR COMPACTION final sum= " + sum + " for "
-            + Bytes.toString(CellUtil.cloneQualifier(sumCell))
-            + " " + this.action);
-      }
-      LOG.info("After major compaction for qualifier="
-          + Bytes.toString(CellUtil.cloneQualifier(sumCell))
-          + " with currentColumnCells.size="
-          + currentColumnCells.size()
-          + " returning finalCells.size=" + finalCells.size()
-          + " with sum=" + sum.longValue()
-          + " with cell timestamp " + sumCell.getTimestamp());
-    } else {
-      String qualifier = "";
-      LOG.info("After major compaction for qualifier=" + qualifier
-          + " with currentColumnCells.size="
-          + currentColumnCells.size()
-          + " returning finalCells.size=" + finalCells.size()
-          + " with zero sum="
-          + sum.longValue());
-    }
-    return finalCells;
-  }
-
-  /**
-   * Determines which cell is to be returned based on the values in each cell
-   * and the comparison operation MIN or MAX.
-   *
-   * @param previouslyChosenCell
-   * @param currentCell
-   * @param currentAggOp
-   * @return the cell which is the min (or max) cell
-   * @throws IOException
-   */
-  private Cell compareCellValues(Cell previouslyChosenCell, Cell currentCell,
-      AggregationOperation currentAggOp, NumericValueConverter converter)
-      throws IOException {
-    if (previouslyChosenCell == null) {
-      return currentCell;
-    }
-    try {
-      Number previouslyChosenCellValue = (Number)converter.decodeValue(
-          CellUtil.cloneValue(previouslyChosenCell));
-      Number currentCellValue = (Number) converter.decodeValue(CellUtil
-          .cloneValue(currentCell));
-      switch (currentAggOp) {
-      case GLOBAL_MIN:
-        if (converter.compare(
-            currentCellValue, previouslyChosenCellValue) < 0) {
-          // new value is minimum, hence return this cell
-          return currentCell;
-        } else {
-          // previously chosen value is miniumum, hence return previous min cell
-          return previouslyChosenCell;
-        }
-      case GLOBAL_MAX:
-        if (converter.compare(
-            currentCellValue, previouslyChosenCellValue) > 0) {
-          // new value is max, hence return this cell
-          return currentCell;
-        } else {
-          // previously chosen value is max, hence return previous max cell
-          return previouslyChosenCell;
-        }
-      default:
-        return currentCell;
-      }
-    } catch (IllegalArgumentException iae) {
-      LOG.error("caught iae during conversion to long ", iae);
-      return currentCell;
-    }
-  }
-
-  @Override
-  public void close() throws IOException {
-    if (flowRunScanner != null) {
-      flowRunScanner.close();
-    } else {
-      LOG.warn("scanner close called but scanner is null");
-    }
-  }
-
-  /**
-   * Called to signal the start of the next() call by the scanner.
-   */
-  public void startNext() {
-    currentRow = null;
-  }
-
-  /**
-   * Returns whether or not the underlying scanner has more rows.
-   */
-  public boolean hasMore() {
-    return currentIndex < availableCells.size() ? true : hasMore;
-  }
-
-  /**
-   * Returns the next available cell for the current row and advances the
-   * pointer to the next cell. This method can be called multiple times in a row
-   * to advance through all the available cells.
-   *
-   * @param scannerContext
-   *          context information for the batch of cells under consideration
-   * @return the next available cell or null if no more cells are available for
-   *         the current row
-   * @throws IOException
-   */
-  public Cell nextCell(ScannerContext scannerContext) throws IOException {
-    Cell cell = peekAtNextCell(scannerContext);
-    if (cell != null) {
-      currentIndex++;
-    }
-    return cell;
-  }
-
-  /**
-   * Returns the next available cell for the current row, without advancing the
-   * pointer. Calling this method multiple times in a row will continue to
-   * return the same cell.
-   *
-   * @param scannerContext
-   *          context information for the batch of cells under consideration
-   * @return the next available cell or null if no more cells are available for
-   *         the current row
-   * @throws IOException if any problem is encountered while grabbing the next
-   *     cell.
-   */
-  public Cell peekAtNextCell(ScannerContext scannerContext) throws IOException {
-    if (currentIndex >= availableCells.size()) {
-      // done with current batch
-      availableCells.clear();
-      currentIndex = 0;
-      hasMore = flowRunScanner.next(availableCells, scannerContext);
-    }
-    Cell cell = null;
-    if (currentIndex < availableCells.size()) {
-      cell = availableCells.get(currentIndex);
-      if (currentRow == null) {
-        currentRow = CellUtil.cloneRow(cell);
-      } else if (!CellUtil.matchingRow(cell, currentRow)) {
-        // moved on to the next row
-        // don't use the current cell
-        // also signal no more cells for this row
-        return null;
-      }
-    }
-    return cell;
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#getMaxResultSize()
-   */
-  @Override
-  public long getMaxResultSize() {
-    if (regionScanner == null) {
-      throw new IllegalStateException(
-          "RegionScanner.isFilterDone() called when the flow "
-              + "scanner's scanner is not a RegionScanner");
-    }
-    return regionScanner.getMaxResultSize();
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#getMvccReadPoint()
-   */
-  @Override
-  public long getMvccReadPoint() {
-    if (regionScanner == null) {
-      throw new IllegalStateException(
-          "RegionScanner.isFilterDone() called when the flow "
-              + "scanner's internal scanner is not a RegionScanner");
-    }
-    return regionScanner.getMvccReadPoint();
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#isFilterDone()
-   */
-  @Override
-  public boolean isFilterDone() throws IOException {
-    if (regionScanner == null) {
-      throw new IllegalStateException(
-          "RegionScanner.isFilterDone() called when the flow "
-              + "scanner's internal scanner is not a RegionScanner");
-    }
-    return regionScanner.isFilterDone();
-
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see org.apache.hadoop.hbase.regionserver.RegionScanner#reseek(byte[])
-   */
-  @Override
-  public boolean reseek(byte[] bytes) throws IOException {
-    if (regionScanner == null) {
-      throw new IllegalStateException(
-          "RegionScanner.reseek() called when the flow "
-              + "scanner's internal scanner is not a RegionScanner");
-    }
-    return regionScanner.reseek(bytes);
-  }
-
-  @Override
-  public int getBatch() {
-    return batchSize;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScannerOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScannerOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScannerOperation.java
deleted file mode 100644
index 73c666f..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowScannerOperation.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-
-/**
- * Identifies the scanner operation on the {@link FlowRunTable}.
- */
-public enum FlowScannerOperation {
-
-  /**
-   * If the scanner is opened for reading
-   * during preGet or preScan.
-   */
-  READ,
-
-  /**
-   * If the scanner is opened during preFlush.
-   */
-  FLUSH,
-
-  /**
-   * If the scanner is opened during minor Compaction.
-   */
-  MINOR_COMPACTION,
-
-  /**
-   * If the scanner is opened during major Compaction.
-   */
-  MAJOR_COMPACTION
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
deleted file mode 100644
index 04963f3..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.server.timelineservice.storage.flow
- * contains classes related to implementation for flow related tables, viz. flow
- * run table and flow activity table.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
deleted file mode 100644
index e78db2a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.server.timelineservice.storage contains
- * classes which define and implement reading and writing to backend storage.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.storage;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
deleted file mode 100644
index 5bacf66..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/AbstractTimelineStorageReader.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.apptoflow.AppToFlowTable;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-
-/**
- * The base class for reading timeline data from the HBase storage. This class
- * provides basic support to validate and augment reader context.
- */
-public abstract class AbstractTimelineStorageReader {
-
-  private final TimelineReaderContext context;
-  /**
-   * Used to look up the flow context.
-   */
-  private final AppToFlowTable appToFlowTable = new AppToFlowTable();
-
-  public AbstractTimelineStorageReader(TimelineReaderContext ctxt) {
-    context = ctxt;
-  }
-
-  protected TimelineReaderContext getContext() {
-    return context;
-  }
-
-  /**
-   * Looks up flow context from AppToFlow table.
-   *
-   * @param appToFlowRowKey to identify Cluster and App Ids.
-   * @param clusterId the cluster id.
-   * @param hbaseConf HBase configuration.
-   * @param conn HBase Connection.
-   * @return flow context information.
-   * @throws IOException if any problem occurs while fetching flow information.
-   */
-  protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey,
-      String clusterId, Configuration hbaseConf, Connection conn)
-      throws IOException {
-    byte[] rowKey = appToFlowRowKey.getRowKey();
-    Get get = new Get(rowKey);
-    Result result = appToFlowTable.getResult(hbaseConf, conn, get);
-    if (result != null && !result.isEmpty()) {
-      Object flowName =
-          AppToFlowColumnPrefix.FLOW_NAME.readResult(result, clusterId);
-      Object flowRunId =
-          AppToFlowColumnPrefix.FLOW_RUN_ID.readResult(result, clusterId);
-      Object userId =
-          AppToFlowColumnPrefix.USER_ID.readResult(result, clusterId);
-      if (flowName == null || userId == null || flowRunId == null) {
-        throw new NotFoundException(
-            "Unable to find the context flow name, and flow run id, "
-            + "and user id for clusterId=" + clusterId
-            + ", appId=" + appToFlowRowKey.getAppId());
-      }
-      return new FlowContext((String)userId, (String)flowName,
-          ((Number)flowRunId).longValue());
-    } else {
-      throw new NotFoundException(
-          "Unable to find the context flow name, and flow run id, "
-          + "and user id for clusterId=" + clusterId
-          + ", appId=" + appToFlowRowKey.getAppId());
-    }
-  }
-
-  /**
-    * Sets certain parameters to defaults if the values are not provided.
-    *
-    * @param hbaseConf HBase Configuration.
-    * @param conn HBase Connection.
-    * @throws IOException if any exception is encountered while setting params.
-    */
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-      throws IOException {
-    defaultAugmentParams(hbaseConf, conn);
-  }
-
-  /**
-   * Default behavior for all timeline readers to augment parameters.
-   *
-   * @param hbaseConf HBase Configuration.
-   * @param conn HBase Connection.
-   * @throws IOException if any exception is encountered while setting params.
-   */
-  final protected void defaultAugmentParams(Configuration hbaseConf,
-      Connection conn) throws IOException {
-    // In reality all three should be null or neither should be null
-    if (context.getFlowName() == null || context.getFlowRunId() == null
-        || context.getUserId() == null) {
-      // Get flow context information from AppToFlow table.
-      AppToFlowRowKey appToFlowRowKey =
-          new AppToFlowRowKey(context.getAppId());
-      FlowContext flowContext =
-          lookupFlowContext(appToFlowRowKey, context.getClusterId(), hbaseConf,
-          conn);
-      context.setFlowName(flowContext.flowName);
-      context.setFlowRunId(flowContext.flowRunId);
-      context.setUserId(flowContext.userId);
-    }
-  }
-
-  /**
-   * Validates the required parameters to read the entities.
-   */
-  protected abstract void validateParams();
-
-  /**
-   * Encapsulates flow context information.
-   */
-  protected static class FlowContext {
-    private final String userId;
-    private final String flowName;
-    private final Long flowRunId;
-
-    public FlowContext(String user, String flowName, Long flowRunId) {
-      this.userId = user;
-      this.flowName = flowName;
-      this.flowRunId = flowRunId;
-    }
-
-    protected String getUserId() {
-      return userId;
-    }
-
-    protected String getFlowName() {
-      return flowName;
-    }
-
-    protected Long getFlowRunId() {
-      return flowRunId;
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
deleted file mode 100644
index e780dcc..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java
+++ /dev/null
@@ -1,488 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Query;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FamilyFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.PageFilter;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
-import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumn;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKey;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationRowKeyPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTable;
-import org.apache.hadoop.yarn.webapp.BadRequestException;
-
-import com.google.common.base.Preconditions;
-
-class SubApplicationEntityReader extends GenericEntityReader {
-  private static final SubApplicationTable SUB_APPLICATION_TABLE =
-      new SubApplicationTable();
-
-  SubApplicationEntityReader(TimelineReaderContext ctxt,
-      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, entityFilters, toRetrieve);
-  }
-
-  SubApplicationEntityReader(TimelineReaderContext ctxt,
-      TimelineDataToRetrieve toRetrieve) {
-    super(ctxt, toRetrieve);
-  }
-
-  /**
-   * Uses the {@link SubApplicationTable}.
-   */
-  protected BaseTable<?> getTable() {
-    return SUB_APPLICATION_TABLE;
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFilters() throws IOException {
-    // Filters here cannot be null for multiple entity reads as they are set in
-    // augmentParams if null.
-    FilterList listBasedOnFilters = new FilterList();
-    TimelineEntityFilters filters = getFilters();
-    // Create filter list based on created time range and add it to
-    // listBasedOnFilters.
-    long createdTimeBegin = filters.getCreatedTimeBegin();
-    long createdTimeEnd = filters.getCreatedTimeEnd();
-    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils
-          .createSingleColValueFiltersByRange(SubApplicationColumn.CREATED_TIME,
-              createdTimeBegin, createdTimeEnd));
-    }
-    // Create filter list based on metric filters and add it to
-    // listBasedOnFilters.
-    TimelineFilterList metricFilters = filters.getMetricFilters();
-    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
-          SubApplicationColumnPrefix.METRIC, metricFilters));
-    }
-    // Create filter list based on config filters and add it to
-    // listBasedOnFilters.
-    TimelineFilterList configFilters = filters.getConfigFilters();
-    if (configFilters != null && !configFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
-          SubApplicationColumnPrefix.CONFIG, configFilters));
-    }
-    // Create filter list based on info filters and add it to listBasedOnFilters
-    TimelineFilterList infoFilters = filters.getInfoFilters();
-    if (infoFilters != null && !infoFilters.getFilterList().isEmpty()) {
-      listBasedOnFilters.addFilter(TimelineFilterUtils
-          .createHBaseFilterList(SubApplicationColumnPrefix.INFO, infoFilters));
-    }
-    return listBasedOnFilters;
-  }
-
-  /**
-   * Add {@link QualifierFilter} filters to filter list for each column of
-   * entity table.
-   *
-   * @param list filter list to which qualifier filters have to be added.
-   */
-  protected void updateFixedColumns(FilterList list) {
-    for (SubApplicationColumn column : SubApplicationColumn.values()) {
-      list.addFilter(new QualifierFilter(CompareOp.EQUAL,
-          new BinaryComparator(column.getColumnQualifierBytes())));
-    }
-  }
-
-  /**
-   * Creates a filter list which indicates that only some of the column
-   * qualifiers in the info column family will be returned in result.
-   *
-   * @param isApplication If true, it means operations are to be performed for
-   *          application table, otherwise for entity table.
-   * @return filter list.
-   * @throws IOException if any problem occurs while creating filter list.
-   */
-  private FilterList createFilterListForColsOfInfoFamily() throws IOException {
-    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
-    // Add filters for each column in entity table.
-    updateFixedColumns(infoFamilyColsFilter);
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // If INFO field has to be retrieved, add a filter for fetching columns
-    // with INFO column prefix.
-    if (hasField(fieldsToRetrieve, Field.INFO)) {
-      infoFamilyColsFilter.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
-              SubApplicationColumnPrefix.INFO));
-    }
-    TimelineFilterList relatesTo = getFilters().getRelatesTo();
-    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-      // If RELATES_TO field has to be retrieved, add a filter for fetching
-      // columns with RELATES_TO column prefix.
-      infoFamilyColsFilter.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
-              SubApplicationColumnPrefix.RELATES_TO));
-    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
-      // Even if fields to retrieve does not contain RELATES_TO, we still
-      // need to have a filter to fetch some of the column qualifiers if
-      // relatesTo filters are specified. relatesTo filters will then be
-      // matched after fetching rows from HBase.
-      Set<String> relatesToCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          SubApplicationColumnPrefix.RELATES_TO, relatesToCols));
-    }
-    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
-    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
-      // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
-      // columns with IS_RELATED_TO column prefix.
-      infoFamilyColsFilter.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
-              SubApplicationColumnPrefix.IS_RELATED_TO));
-    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
-      // Even if fields to retrieve does not contain IS_RELATED_TO, we still
-      // need to have a filter to fetch some of the column qualifiers if
-      // isRelatedTo filters are specified. isRelatedTo filters will then be
-      // matched after fetching rows from HBase.
-      Set<String> isRelatedToCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          SubApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
-    }
-    TimelineFilterList eventFilters = getFilters().getEventFilters();
-    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
-      // If EVENTS field has to be retrieved, add a filter for fetching columns
-      // with EVENT column prefix.
-      infoFamilyColsFilter.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.EQUAL,
-              SubApplicationColumnPrefix.EVENT));
-    } else if (eventFilters != null
-        && !eventFilters.getFilterList().isEmpty()) {
-      // Even if fields to retrieve does not contain EVENTS, we still need to
-      // have a filter to fetch some of the column qualifiers on the basis of
-      // event filters specified. Event filters will then be matched after
-      // fetching rows from HBase.
-      Set<String> eventCols =
-          TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
-      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
-          SubApplicationColumnPrefix.EVENT, eventCols));
-    }
-    return infoFamilyColsFilter;
-  }
-
-  /**
-   * Exclude column prefixes via filters which are not required(based on fields
-   * to retrieve) from info column family. These filters are added to filter
-   * list which contains a filter for getting info column family.
-   *
-   * @param infoColFamilyList filter list for info column family.
-   */
-  private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // Events not required.
-    if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
-      infoColFamilyList.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
-              SubApplicationColumnPrefix.EVENT));
-    }
-    // info not required.
-    if (!hasField(fieldsToRetrieve, Field.INFO)) {
-      infoColFamilyList.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
-              SubApplicationColumnPrefix.INFO));
-    }
-    // is related to not required.
-    if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
-      infoColFamilyList.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
-              SubApplicationColumnPrefix.IS_RELATED_TO));
-    }
-    // relates to not required.
-    if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-      infoColFamilyList.addFilter(
-          TimelineFilterUtils.createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
-              SubApplicationColumnPrefix.RELATES_TO));
-    }
-  }
-
-  /**
-   * Updates filter list based on fields for confs and metrics to retrieve.
-   *
-   * @param listBasedOnFields filter list based on fields.
-   * @throws IOException if any problem occurs while updating filter list.
-   */
-  private void updateFilterForConfsAndMetricsToRetrieve(
-      FilterList listBasedOnFields) throws IOException {
-    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
-    // Please note that if confsToRetrieve is specified, we would have added
-    // CONFS to fields to retrieve in augmentParams() even if not specified.
-    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
-      // Create a filter list for configs.
-      listBasedOnFields.addFilter(
-          TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
-              dataToRetrieve.getConfsToRetrieve(),
-              SubApplicationColumnFamily.CONFIGS,
-              SubApplicationColumnPrefix.CONFIG));
-    }
-
-    // Please note that if metricsToRetrieve is specified, we would have added
-    // METRICS to fields to retrieve in augmentParams() even if not specified.
-    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
-      // Create a filter list for metrics.
-      listBasedOnFields.addFilter(
-          TimelineFilterUtils.createFilterForConfsOrMetricsToRetrieve(
-              dataToRetrieve.getMetricsToRetrieve(),
-              SubApplicationColumnFamily.METRICS,
-              SubApplicationColumnPrefix.METRIC));
-    }
-  }
-
-  @Override
-  protected FilterList constructFilterListBasedOnFields() throws IOException {
-    if (!needCreateFilterListBasedOnFields()) {
-      // Fetch all the columns. No need of a filter.
-      return null;
-    }
-    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
-    FilterList infoColFamilyList = new FilterList();
-    // By default fetch everything in INFO column family.
-    FamilyFilter infoColumnFamily = new FamilyFilter(CompareOp.EQUAL,
-        new BinaryComparator(SubApplicationColumnFamily.INFO.getBytes()));
-    infoColFamilyList.addFilter(infoColumnFamily);
-    if (fetchPartialColsFromInfoFamily()) {
-      // We can fetch only some of the columns from info family.
-      infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
-    } else {
-      // Exclude column prefixes in info column family which are not required
-      // based on fields to retrieve.
-      excludeFieldsFromInfoColFamily(infoColFamilyList);
-    }
-    listBasedOnFields.addFilter(infoColFamilyList);
-    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
-    return listBasedOnFields;
-  }
-
-  @Override
-  protected void validateParams() {
-    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
-    Preconditions.checkNotNull(getDataToRetrieve(),
-        "data to retrieve shouldn't be null");
-    Preconditions.checkNotNull(getContext().getClusterId(),
-        "clusterId shouldn't be null");
-    Preconditions.checkNotNull(getContext().getDoAsUser(),
-        "DoAsUser shouldn't be null");
-    Preconditions.checkNotNull(getContext().getEntityType(),
-        "entityType shouldn't be null");
-  }
-
-  @Override
-  protected void augmentParams(Configuration hbaseConf, Connection conn)
-      throws IOException {
-    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
-    createFiltersIfNull();
-  }
-
-  private void setMetricsTimeRange(Query query) {
-    // Set time range for metric values.
-    HBaseTimelineStorageUtils.setMetricsTimeRange(query,
-        SubApplicationColumnFamily.METRICS.getBytes(),
-        getDataToRetrieve().getMetricsTimeBegin(),
-        getDataToRetrieve().getMetricsTimeEnd());
-  }
-
-  @Override
-  protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException {
-
-    // Scan through part of the table to find the entities belong to one app
-    // and one type
-    Scan scan = new Scan();
-    TimelineReaderContext context = getContext();
-    if (context.getDoAsUser() == null) {
-      throw new BadRequestException("Invalid user!");
-    }
-
-    RowKeyPrefix<SubApplicationRowKey> subApplicationRowKeyPrefix = null;
-    // default mode, will always scans from beginning of entity type.
-    if (getFilters() == null || getFilters().getFromId() == null) {
-      subApplicationRowKeyPrefix = new SubApplicationRowKeyPrefix(
-          context.getDoAsUser(), context.getClusterId(),
-          context.getEntityType(), null, null, null);
-      scan.setRowPrefixFilter(subApplicationRowKeyPrefix.getRowKeyPrefix());
-    } else { // pagination mode, will scan from given entityIdPrefix!enitityId
-
-      SubApplicationRowKey entityRowKey = null;
-      try {
-        entityRowKey = SubApplicationRowKey
-            .parseRowKeyFromString(getFilters().getFromId());
-      } catch (IllegalArgumentException e) {
-        throw new BadRequestException("Invalid filter fromid is provided.");
-      }
-      if (!context.getClusterId().equals(entityRowKey.getClusterId())) {
-        throw new BadRequestException(
-            "fromid doesn't belong to clusterId=" + context.getClusterId());
-      }
-
-      // set start row
-      scan.setStartRow(entityRowKey.getRowKey());
-
-      // get the bytes for stop row
-      subApplicationRowKeyPrefix = new SubApplicationRowKeyPrefix(
-          context.getDoAsUser(), context.getClusterId(),
-          context.getEntityType(), null, null, null);
-
-      // set stop row
-      scan.setStopRow(
-          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
-              subApplicationRowKeyPrefix.getRowKeyPrefix()));
-
-      // set page filter to limit. This filter has to set only in pagination
-      // mode.
-      filterList.addFilter(new PageFilter(getFilters().getLimit()));
-    }
-    setMetricsTimeRange(scan);
-    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
-    if (filterList != null && !filterList.getFilters().isEmpty()) {
-      scan.setFilter(filterList);
-    }
-    return getTable().getResultScanner(hbaseConf, conn, scan);
-  }
-
-  @Override
-  protected Result getResult(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException {
-    throw new UnsupportedOperationException(
-        "we don't support a single entity query");
-  }
-
-  @Override
-  protected TimelineEntity parseEntity(Result result) throws IOException {
-    if (result == null || result.isEmpty()) {
-      return null;
-    }
-    TimelineEntity entity = new TimelineEntity();
-    SubApplicationRowKey parseRowKey =
-        SubApplicationRowKey.parseRowKey(result.getRow());
-    entity.setType(parseRowKey.getEntityType());
-    entity.setId(parseRowKey.getEntityId());
-    entity.setIdPrefix(parseRowKey.getEntityIdPrefix().longValue());
-
-    TimelineEntityFilters filters = getFilters();
-    // fetch created time
-    Long createdTime =
-        (Long) SubApplicationColumn.CREATED_TIME.readResult(result);
-    entity.setCreatedTime(createdTime);
-
-    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
-    // fetch is related to entities and match isRelatedTo filter. If isRelatedTo
-    // filters do not match, entity would be dropped. We have to match filters
-    // locally as relevant HBase filters to filter out rows on the basis of
-    // isRelatedTo are not set in HBase scan.
-    boolean checkIsRelatedTo =
-        filters.getIsRelatedTo() != null
-            && filters.getIsRelatedTo().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO) || checkIsRelatedTo) {
-      readRelationship(entity, result, SubApplicationColumnPrefix.IS_RELATED_TO,
-          true);
-      if (checkIsRelatedTo && !TimelineStorageUtils.matchIsRelatedTo(entity,
-          filters.getIsRelatedTo())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
-        entity.getIsRelatedToEntities().clear();
-      }
-    }
-
-    // fetch relates to entities and match relatesTo filter. If relatesTo
-    // filters do not match, entity would be dropped. We have to match filters
-    // locally as relevant HBase filters to filter out rows on the basis of
-    // relatesTo are not set in HBase scan.
-    boolean checkRelatesTo =
-        !isSingleEntityRead() && filters.getRelatesTo() != null
-            && filters.getRelatesTo().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.RELATES_TO) || checkRelatesTo) {
-      readRelationship(entity, result, SubApplicationColumnPrefix.RELATES_TO,
-          false);
-      if (checkRelatesTo && !TimelineStorageUtils.matchRelatesTo(entity,
-          filters.getRelatesTo())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
-        entity.getRelatesToEntities().clear();
-      }
-    }
-
-    // fetch info if fieldsToRetrieve contains INFO or ALL.
-    if (hasField(fieldsToRetrieve, Field.INFO)) {
-      readKeyValuePairs(entity, result, SubApplicationColumnPrefix.INFO, false);
-    }
-
-    // fetch configs if fieldsToRetrieve contains CONFIGS or ALL.
-    if (hasField(fieldsToRetrieve, Field.CONFIGS)) {
-      readKeyValuePairs(entity, result, SubApplicationColumnPrefix.CONFIG,
-          true);
-    }
-
-    // fetch events and match event filters if they exist. If event filters do
-    // not match, entity would be dropped. We have to match filters locally
-    // as relevant HBase filters to filter out rows on the basis of events
-    // are not set in HBase scan.
-    boolean checkEvents =
-        !isSingleEntityRead() && filters.getEventFilters() != null
-            && filters.getEventFilters().getFilterList().size() > 0;
-    if (hasField(fieldsToRetrieve, Field.EVENTS) || checkEvents) {
-      readEvents(entity, result, SubApplicationColumnPrefix.EVENT);
-      if (checkEvents && !TimelineStorageUtils.matchEventFilters(entity,
-          filters.getEventFilters())) {
-        return null;
-      }
-      if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
-        entity.getEvents().clear();
-      }
-    }
-
-    // fetch metrics if fieldsToRetrieve contains METRICS or ALL.
-    if (hasField(fieldsToRetrieve, Field.METRICS)) {
-      readMetrics(entity, result, SubApplicationColumnPrefix.METRIC);
-    }
-
-    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
-        parseRowKey.getRowKeyAsString());
-    return entity;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
deleted file mode 100644
index 07e8423..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReader.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import java.io.IOException;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FilterList.Operator;
-import org.apache.hadoop.hbase.filter.QualifierFilter;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent;
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
-import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The base class for reading and deserializing timeline entities from the
- * HBase storage. Different types can be defined for different types of the
- * entities that are being requested.
- */
-public abstract class TimelineEntityReader extends
-    AbstractTimelineStorageReader {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TimelineEntityReader.class);
-
-  private final boolean singleEntityRead;
-  private TimelineDataToRetrieve dataToRetrieve;
-  // used only for multiple entity read mode
-  private TimelineEntityFilters filters;
-
-  /**
-   * Main table the entity reader uses.
-   */
-  private BaseTable<?> table;
-
-  /**
-   * Used to convert strings key components to and from storage format.
-   */
-  private final KeyConverter<String> stringKeyConverter =
-      new StringKeyConverter();
-
-  /**
-   * Instantiates a reader for multiple-entity reads.
-   *
-   * @param ctxt Reader context which defines the scope in which query has to be
-   *     made.
-   * @param entityFilters Filters which limit the entities returned.
-   * @param toRetrieve Data to retrieve for each entity.
-   */
-  protected TimelineEntityReader(TimelineReaderContext ctxt,
-      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
-    super(ctxt);
-    this.singleEntityRead = false;
-    this.dataToRetrieve = toRetrieve;
-    this.filters = entityFilters;
-
-    this.setTable(getTable());
-  }
-
-  /**
-   * Instantiates a reader for single-entity reads.
-   *
-   * @param ctxt Reader context which defines the scope in which query has to be
-   *     made.
-   * @param toRetrieve Data to retrieve for each entity.
-   */
-  protected TimelineEntityReader(TimelineReaderContext ctxt,
-      TimelineDataToRetrieve toRetrieve) {
-    super(ctxt);
-    this.singleEntityRead = true;
-    this.dataToRetrieve = toRetrieve;
-
-    this.setTable(getTable());
-  }
-
-  /**
-   * Creates a {@link FilterList} based on fields, confs and metrics to
-   * retrieve. This filter list will be set in Scan/Get objects to trim down
-   * results fetched from HBase back-end storage. This is called only for
-   * multiple entity reads.
-   *
-   * @return a {@link FilterList} object.
-   * @throws IOException if any problem occurs while creating filter list.
-   */
-  protected abstract FilterList constructFilterListBasedOnFields()
-      throws IOException;
-
-  /**
-   * Creates a {@link FilterList} based on info, config and metric filters. This
-   * filter list will be set in HBase Get to trim down results fetched from
-   * HBase back-end storage.
-   *
-   * @return a {@link FilterList} object.
-   * @throws IOException if any problem occurs while creating filter list.
-   */
-  protected abstract FilterList constructFilterListBasedOnFilters()
-      throws IOException;
-
-  /**
-   * Combines filter lists created based on fields and based on filters.
-   *
-   * @return a {@link FilterList} object if it can be constructed. Returns null,
-   * if filter list cannot be created either on the basis of filters or on the
-   * basis of fields.
-   * @throws IOException if any problem occurs while creating filter list.
-   */
-  private FilterList createFilterList() throws IOException {
-    FilterList listBasedOnFilters = constructFilterListBasedOnFilters();
-    boolean hasListBasedOnFilters = listBasedOnFilters != null &&
-        !listBasedOnFilters.getFilters().isEmpty();
-    FilterList listBasedOnFields = constructFilterListBasedOnFields();
-    boolean hasListBasedOnFields = listBasedOnFields != null &&
-        !listBasedOnFields.getFilters().isEmpty();
-    // If filter lists based on both filters and fields can be created,
-    // combine them in a new filter list and return it.
-    // If either one of them has been created, return that filter list.
-    // Return null, if none of the filter lists can be created. This indicates
-    // that no filter list needs to be added to HBase Scan as filters are not
-    // specified for the query or only the default view of entity needs to be
-    // returned.
-    if (hasListBasedOnFilters && hasListBasedOnFields) {
-      FilterList list = new FilterList();
-      list.addFilter(listBasedOnFilters);
-      list.addFilter(listBasedOnFields);
-      return list;
-    } else if (hasListBasedOnFilters) {
-      return listBasedOnFilters;
-    } else if (hasListBasedOnFields) {
-      return listBasedOnFields;
-    }
-    return null;
-  }
-
-  protected TimelineDataToRetrieve getDataToRetrieve() {
-    return dataToRetrieve;
-  }
-
-  protected TimelineEntityFilters getFilters() {
-    return filters;
-  }
-
-  /**
-   * Create a {@link TimelineEntityFilters} object with default values for
-   * filters.
-   */
-  protected void createFiltersIfNull() {
-    if (filters == null) {
-      filters = new TimelineEntityFilters.Builder().build();
-    }
-  }
-
-  /**
-   * Reads and deserializes a single timeline entity from the HBase storage.
-   *
-   * @param hbaseConf HBase Configuration.
-   * @param conn HBase Connection.
-   * @return A <cite>TimelineEntity</cite> object.
-   * @throws IOException if there is any exception encountered while reading
-   *     entity.
-   */
-  public TimelineEntity readEntity(Configuration hbaseConf, Connection conn)
-      throws IOException {
-    validateParams();
-    augmentParams(hbaseConf, conn);
-
-    FilterList filterList = constructFilterListBasedOnFields();
-    if (LOG.isDebugEnabled() && filterList != null) {
-      LOG.debug("FilterList created for get is - " + filterList);
-    }
-    Result result = getResult(hbaseConf, conn, filterList);
-    if (result == null || result.isEmpty()) {
-      // Could not find a matching row.
-      LOG.info("Cannot find matching entity of type " +
-          getContext().getEntityType());
-      return null;
-    }
-    return parseEntity(result);
-  }
-
-  /**
-   * Reads and deserializes a set of timeline entities from the HBase storage.
-   * It goes through all the results available, and returns the number of
-   * entries as specified in the limit in the entity's natural sort order.
-   *
-   * @param hbaseConf HBase Configuration.
-   * @param conn HBase Connection.
-   * @return a set of <cite>TimelineEntity</cite> objects.
-   * @throws IOException if any exception is encountered while reading entities.
-   */
-  public Set<TimelineEntity> readEntities(Configuration hbaseConf,
-      Connection conn) throws IOException {
-    validateParams();
-    augmentParams(hbaseConf, conn);
-
-    Set<TimelineEntity> entities = new LinkedHashSet<>();
-    FilterList filterList = createFilterList();
-    if (LOG.isDebugEnabled() && filterList != null) {
-      LOG.debug("FilterList created for scan is - " + filterList);
-    }
-    ResultScanner results = getResults(hbaseConf, conn, filterList);
-    try {
-      for (Result result : results) {
-        TimelineEntity entity = parseEntity(result);
-        if (entity == null) {
-          continue;
-        }
-        entities.add(entity);
-        if (entities.size() == filters.getLimit()) {
-          break;
-        }
-      }
-      return entities;
-    } finally {
-      results.close();
-    }
-  }
-
-  /**
-   * Returns the main table to be used by the entity reader.
-   *
-   * @return A reference to the table.
-   */
-  protected BaseTable<?> getTable() {
-    return table;
-  }
-
-  /**
-   * Fetches a {@link Result} instance for a single-entity read.
-   *
-   * @param hbaseConf HBase Configuration.
-   * @param conn HBase Connection.
-   * @param filterList filter list which will be applied to HBase Get.
-   * @return the {@link Result} instance or null if no such record is found.
-   * @throws IOException if any exception is encountered while getting result.
-   */
-  protected abstract Result getResult(Configuration hbaseConf, Connection conn,
-      FilterList filterList) throws IOException;
-
-  /**
-   * Fetches a {@link ResultScanner} for a multi-entity read.
-   *
-   * @param hbaseConf HBase Configuration.
-   * @param conn HBase Connection.
-   * @param filterList filter list which will be applied to HBase Scan.
-   * @return the {@link ResultScanner} instance.
-   * @throws IOException if any exception is encountered while getting results.
-   */
-  protected abstract ResultScanner getResults(Configuration hbaseConf,
-      Connection conn, FilterList filterList) throws IOException;
-
-  /**
-   * Parses the result retrieved from HBase backend and convert it into a
-   * {@link TimelineEntity} object.
-   *
-   * @param result Single row result of a Get/Scan.
-   * @return the <cite>TimelineEntity</cite> instance or null if the entity is
-   *     filtered.
-   * @throws IOException if any exception is encountered while parsing entity.
-   */
-  protected abstract TimelineEntity parseEntity(Result result)
-      throws IOException;
-
-  /**
-   * Helper method for reading and deserializing {@link TimelineMetric} objects
-   * using the specified column prefix. The timeline metrics then are added to
-   * the given timeline entity.
-   *
-   * @param entity {@link TimelineEntity} object.
-   * @param result {@link Result} object retrieved from backend.
-   * @param columnPrefix Metric column prefix
-   * @throws IOException if any exception is encountered while reading metrics.
-   */
-  protected void readMetrics(TimelineEntity entity, Result result,
-      ColumnPrefix<?> columnPrefix) throws IOException {
-    NavigableMap<String, NavigableMap<Long, Number>> metricsResult =
-        columnPrefix.readResultsWithTimestamps(
-            result, stringKeyConverter);
-    for (Map.Entry<String, NavigableMap<Long, Number>> metricResult:
-        metricsResult.entrySet()) {
-      TimelineMetric metric = new TimelineMetric();
-      metric.setId(metricResult.getKey());
-      // Simply assume that if the value set contains more than 1 elements, the
-      // metric is a TIME_SERIES metric, otherwise, it's a SINGLE_VALUE metric
-      TimelineMetric.Type metricType = metricResult.getValue().size() > 1 ?
-          TimelineMetric.Type.TIME_SERIES : TimelineMetric.Type.SINGLE_VALUE;
-      metric.setType(metricType);
-      metric.addValues(metricResult.getValue());
-      entity.addMetric(metric);
-    }
-  }
-
-  /**
-   * Checks whether the reader has been created to fetch single entity or
-   * multiple entities.
-   *
-   * @return true, if query is for single entity, false otherwise.
-   */
-  public boolean isSingleEntityRead() {
-    return singleEntityRead;
-  }
-
-  protected void setTable(BaseTable<?> baseTable) {
-    this.table = baseTable;
-  }
-
-  /**
-   * Check if we have a certain field amongst fields to retrieve. This method
-   * checks against {@link Field#ALL} as well because that would mean field
-   * passed needs to be matched.
-   *
-   * @param fieldsToRetrieve fields to be retrieved.
-   * @param requiredField fields to be checked in fieldsToRetrieve.
-   * @return true if has the required field, false otherwise.
-   */
-  protected boolean hasField(EnumSet<Field> fieldsToRetrieve,
-      Field requiredField) {
-    return fieldsToRetrieve.contains(Field.ALL) ||
-        fieldsToRetrieve.contains(requiredField);
-  }
-
-  /**
-   * Create a filter list of qualifier filters based on passed set of columns.
-   *
-   * @param <T> Describes the type of column prefix.
-   * @param colPrefix Column Prefix.
-   * @param columns set of column qualifiers.
-   * @return filter list.
-   */
-  protected <T> FilterList createFiltersFromColumnQualifiers(
-      ColumnPrefix<T> colPrefix, Set<String> columns) {
-    FilterList list = new FilterList(Operator.MUST_PASS_ONE);
-    for (String column : columns) {
-      // For columns which have compound column qualifiers (eg. events), we need
-      // to include the required separator.
-      byte[] compoundColQual = createColQualifierPrefix(colPrefix, column);
-      list.addFilter(new QualifierFilter(CompareOp.EQUAL,
-          new BinaryPrefixComparator(colPrefix
-              .getColumnPrefixBytes(compoundColQual))));
-    }
-    return list;
-  }
-
-  protected <T> byte[] createColQualifierPrefix(ColumnPrefix<T> colPrefix,
-      String column) {
-    if (colPrefix == ApplicationColumnPrefix.EVENT
-        || colPrefix == EntityColumnPrefix.EVENT) {
-      return new EventColumnName(column, null, null).getColumnQualifier();
-    } else {
-      return stringKeyConverter.encode(column);
-    }
-  }
-
-  /**
-   * Helper method for reading relationship.
-   *
-   * @param <T> Describes the type of column prefix.
-   * @param entity entity to fill.
-   * @param result result from HBase.
-   * @param prefix column prefix.
-   * @param isRelatedTo if true, means relationship is to be added to
-   *          isRelatedTo, otherwise its added to relatesTo.
-   * @throws IOException if any problem is encountered while reading result.
-   */
-  protected <T> void readRelationship(TimelineEntity entity, Result result,
-      ColumnPrefix<T> prefix, boolean isRelatedTo) throws IOException {
-    // isRelatedTo and relatesTo are of type Map<String, Set<String>>
-    Map<String, Object> columns =
-        prefix.readResults(result, stringKeyConverter);
-    for (Map.Entry<String, Object> column : columns.entrySet()) {
-      for (String id : Separator.VALUES.splitEncoded(column.getValue()
-          .toString())) {
-        if (isRelatedTo) {
-          entity.addIsRelatedToEntity(column.getKey(), id);
-        } else {
-          entity.addRelatesToEntity(column.getKey(), id);
-        }
-      }
-    }
-  }
-
-  /**
-   * Read events from the entity table or the application table. The column name
-   * is of the form "eventId=timestamp=infoKey" where "infoKey" may be omitted
-   * if there is no info associated with the event.
-   *
-   * @param <T> Describes the type of column prefix.
-   * @param entity entity to fill.
-   * @param result HBase Result.
-   * @param prefix column prefix.
-   * @throws IOException if any problem is encountered while reading result.
-   */
-  protected static <T> void readEvents(TimelineEntity entity, Result result,
-      ColumnPrefix<T> prefix) throws IOException {
-    Map<String, TimelineEvent> eventsMap = new HashMap<>();
-    Map<EventColumnName, Object> eventsResult =
-        prefix.readResults(result, new EventColumnNameConverter());
-    for (Map.Entry<EventColumnName, Object>
-             eventResult : eventsResult.entrySet()) {
-      EventColumnName eventColumnName = eventResult.getKey();
-      String key = eventColumnName.getId() +
-          Long.toString(eventColumnName.getTimestamp());
-      // Retrieve previously seen event to add to it
-      TimelineEvent event = eventsMap.get(key);
-      if (event == null) {
-        // First time we're seeing this event, add it to the eventsMap
-        event = new TimelineEvent();
-        event.setId(eventColumnName.getId());
-        event.setTimestamp(eventColumnName.getTimestamp());
-        eventsMap.put(key, event);
-      }
-      if (eventColumnName.getInfoKey() != null) {
-        event.addInfo(eventColumnName.getInfoKey(), eventResult.getValue());
-      }
-    }
-    Set<TimelineEvent> eventsSet = new HashSet<>(eventsMap.values());
-    entity.addEvents(eventsSet);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
deleted file mode 100644
index fa16077..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/TimelineEntityReaderFactory.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
-
-/**
- * Factory methods for instantiating a timeline entity reader.
- */
-public final class TimelineEntityReaderFactory {
-  private TimelineEntityReaderFactory() {
-  }
-
-  /**
-   * Creates a timeline entity reader instance for reading a single entity with
-   * the specified input.
-   *
-   * @param context Reader context which defines the scope in which query has to
-   *     be made.
-   * @param dataToRetrieve Data to retrieve for each entity.
-   * @return An implementation of <cite>TimelineEntityReader</cite> object
-   *     depending on entity type.
-   */
-  public static TimelineEntityReader createSingleEntityReader(
-      TimelineReaderContext context, TimelineDataToRetrieve dataToRetrieve) {
-    // currently the types that are handled separate from the generic entity
-    // table are application, flow run, and flow activity entities
-    if (TimelineEntityType.YARN_APPLICATION.matches(context.getEntityType())) {
-      return new ApplicationEntityReader(context, dataToRetrieve);
-    } else if (TimelineEntityType.
-        YARN_FLOW_RUN.matches(context.getEntityType())) {
-      return new FlowRunEntityReader(context, dataToRetrieve);
-    } else if (TimelineEntityType.
-        YARN_FLOW_ACTIVITY.matches(context.getEntityType())) {
-      return new FlowActivityEntityReader(context, dataToRetrieve);
-    } else {
-      // assume we're dealing with a generic entity read
-      return new GenericEntityReader(context, dataToRetrieve);
-    }
-  }
-
-  /**
-   * Creates a timeline entity reader instance for reading set of entities with
-   * the specified input and predicates.
-   *
-   * @param context Reader context which defines the scope in which query has to
-   *     be made.
-   * @param filters Filters which limit the entities returned.
-   * @param dataToRetrieve Data to retrieve for each entity.
-   * @return An implementation of <cite>TimelineEntityReader</cite> object
-   *     depending on entity type.
-   */
-  public static TimelineEntityReader createMultipleEntitiesReader(
-      TimelineReaderContext context, TimelineEntityFilters filters,
-      TimelineDataToRetrieve dataToRetrieve) {
-    // currently the types that are handled separate from the generic entity
-    // table are application, flow run, and flow activity entities
-    if (TimelineEntityType.YARN_APPLICATION.matches(context.getEntityType())) {
-      return new ApplicationEntityReader(context, filters, dataToRetrieve);
-    } else if (TimelineEntityType.
-        YARN_FLOW_ACTIVITY.matches(context.getEntityType())) {
-      return new FlowActivityEntityReader(context, filters, dataToRetrieve);
-    } else if (TimelineEntityType.
-        YARN_FLOW_RUN.matches(context.getEntityType())) {
-      return new FlowRunEntityReader(context, filters, dataToRetrieve);
-    } else {
-      if (context.getDoAsUser() != null) {
-        return new SubApplicationEntityReader(context, filters, dataToRetrieve);
-      }
-      // assume we're dealing with a generic entity read
-      return new GenericEntityReader(context, filters, dataToRetrieve);
-    }
-  }
-
-  /**
-   * Creates a timeline entity type reader that will read all available entity
-   * types within the specified context.
-   *
-   * @param context Reader context which defines the scope in which query has to
-   *                be made. Limited to application level only.
-   * @return an <cite>EntityTypeReader</cite> object
-   */
-  public static EntityTypeReader createEntityTypeReader(
-      TimelineReaderContext context) {
-    return new EntityTypeReader(context);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/package-info.java
deleted file mode 100644
index 9814d6d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/package-info.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Package org.apache.hadoop.yarn.server.timelineservice.storage.reader
- * contains classes used to read entities from backend based on query type.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
deleted file mode 100644
index 46b0cc9..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies fully qualified columns for the {@link SubApplicationTable}.
- */
-public enum SubApplicationColumn implements Column<SubApplicationTable> {
-
-  /**
-   * Identifier for the sub application.
-   */
-  ID(SubApplicationColumnFamily.INFO, "id"),
-
-  /**
-   * The type of sub application.
-   */
-  TYPE(SubApplicationColumnFamily.INFO, "type"),
-
-  /**
-   * When the sub application was created.
-   */
-  CREATED_TIME(SubApplicationColumnFamily.INFO, "created_time",
-      new LongConverter()),
-
-  /**
-   * The version of the flow that this sub application belongs to.
-   */
-  FLOW_VERSION(SubApplicationColumnFamily.INFO, "flow_version");
-
-  private final ColumnHelper<SubApplicationTable> column;
-  private final ColumnFamily<SubApplicationTable> columnFamily;
-  private final String columnQualifier;
-  private final byte[] columnQualifierBytes;
-
-  SubApplicationColumn(ColumnFamily<SubApplicationTable> columnFamily,
-      String columnQualifier) {
-    this(columnFamily, columnQualifier, GenericConverter.getInstance());
-  }
-
-  SubApplicationColumn(ColumnFamily<SubApplicationTable> columnFamily,
-      String columnQualifier, ValueConverter converter) {
-    this.columnFamily = columnFamily;
-    this.columnQualifier = columnQualifier;
-    // Future-proof by ensuring the right column prefix hygiene.
-    this.columnQualifierBytes =
-        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
-    this.column = new ColumnHelper<SubApplicationTable>(columnFamily,
-        converter);
-  }
-
-
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<SubApplicationTable> tableMutator, Long timestamp,
-      Object inputValue, Attribute... attributes) throws IOException {
-    column.store(rowKey, tableMutator, columnQualifierBytes, timestamp,
-        inputValue, attributes);
-  }
-
-  public Object readResult(Result result) throws IOException {
-    return column.readResult(result, columnQualifierBytes);
-  }
-
-  @Override
-  public byte[] getColumnQualifierBytes() {
-    return columnQualifierBytes.clone();
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
deleted file mode 100644
index 1d7f8fd..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents the sub application table column families.
- */
-public enum SubApplicationColumnFamily
-    implements ColumnFamily<SubApplicationTable> {
-
-  /**
-   * Info column family houses known columns, specifically ones included in
-   * columnfamily filters.
-   */
-  INFO("i"),
-
-  /**
-   * Configurations are in a separate column family for two reasons:
-   * a) the size of the config values can be very large and
-   * b) we expect that config values
-   * are often separately accessed from other metrics and info columns.
-   */
-  CONFIGS("c"),
-
-  /**
-   * Metrics have a separate column family, because they have a separate TTL.
-   */
-  METRICS("m");
-
-  /**
-   * Byte representation of this column family.
-   */
-  private final byte[] bytes;
-
-  /**
-   * @param value
-   *          create a column family with this name. Must be lower case and
-   *          without spaces.
-   */
-  SubApplicationColumnFamily(String value) {
-    // column families should be lower case and not contain any spaces.
-    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
-  }
-
-  public byte[] getBytes() {
-    return Bytes.copy(bytes);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
deleted file mode 100644
index 06ecced..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.TypedBufferedMutator;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
-
-/**
- * Identifies partially qualified columns for the sub app table.
- */
-public enum SubApplicationColumnPrefix
-    implements ColumnPrefix<SubApplicationTable> {
-
-  /**
-   * To store TimelineEntity getIsRelatedToEntities values.
-   */
-  IS_RELATED_TO(SubApplicationColumnFamily.INFO, "s"),
-
-  /**
-   * To store TimelineEntity getRelatesToEntities values.
-   */
-  RELATES_TO(SubApplicationColumnFamily.INFO, "r"),
-
-  /**
-   * To store TimelineEntity info values.
-   */
-  INFO(SubApplicationColumnFamily.INFO, "i"),
-
-  /**
-   * Lifecycle events for an entity.
-   */
-  EVENT(SubApplicationColumnFamily.INFO, "e", true),
-
-  /**
-   * Config column stores configuration with config key as the column name.
-   */
-  CONFIG(SubApplicationColumnFamily.CONFIGS, null),
-
-  /**
-   * Metrics are stored with the metric name as the column name.
-   */
-  METRIC(SubApplicationColumnFamily.METRICS, null, new LongConverter());
-
-  private final ColumnHelper<SubApplicationTable> column;
-  private final ColumnFamily<SubApplicationTable> columnFamily;
-
-  /**
-   * Can be null for those cases where the provided column qualifier is the
-   * entire column name.
-   */
-  private final String columnPrefix;
-  private final byte[] columnPrefixBytes;
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   */
-  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
-      String columnPrefix) {
-    this(columnFamily, columnPrefix, false, GenericConverter.getInstance());
-  }
-
-  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
-      String columnPrefix, boolean compondColQual) {
-    this(columnFamily, columnPrefix, compondColQual,
-        GenericConverter.getInstance());
-  }
-
-  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
-      String columnPrefix, ValueConverter converter) {
-    this(columnFamily, columnPrefix, false, converter);
-  }
-
-  /**
-   * Private constructor, meant to be used by the enum definition.
-   *
-   * @param columnFamily that this column is stored in.
-   * @param columnPrefix for this column.
-   * @param converter used to encode/decode values to be stored in HBase for
-   * this column prefix.
-   */
-  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
-      String columnPrefix, boolean compondColQual, ValueConverter converter) {
-    column = new ColumnHelper<SubApplicationTable>(columnFamily, converter);
-    this.columnFamily = columnFamily;
-    this.columnPrefix = columnPrefix;
-    if (columnPrefix == null) {
-      this.columnPrefixBytes = null;
-    } else {
-      // Future-proof by ensuring the right column prefix hygiene.
-      this.columnPrefixBytes =
-          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
-    }
-  }
-
-  /**
-   * @return the column name value
-   */
-  public String getColumnPrefix() {
-    return columnPrefix;
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
-    return ColumnHelper.getColumnQualifier(
-        this.columnPrefixBytes, qualifierPrefix);
-  }
-
-  @Override
-  public byte[] getColumnFamilyBytes() {
-    return columnFamily.getBytes();
-  }
-
-  @Override
-  public ValueConverter getValueConverter() {
-    return column.getValueConverter();
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute[])
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<SubApplicationTable> tableMutator, String qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        attributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #store(byte[],
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.
-   * TypedBufferedMutator, java.lang.String, java.lang.Long, java.lang.Object)
-   */
-  public void store(byte[] rowKey,
-      TypedBufferedMutator<SubApplicationTable> tableMutator, byte[] qualifier,
-      Long timestamp, Object inputValue, Attribute... attributes)
-      throws IOException {
-
-    // Null check
-    if (qualifier == null) {
-      throw new IOException("Cannot store column with null qualifier in "
-          + tableMutator.getName().getNameAsString());
-    }
-
-    byte[] columnQualifier = getColumnPrefixBytes(qualifier);
-
-    column.store(rowKey, tableMutator, columnQualifier, timestamp, inputValue,
-        attributes);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResult(org.apache.hadoop.hbase.client.Result, java.lang.String)
-   */
-  public Object readResult(Result result, String qualifier) throws IOException {
-    byte[] columnQualifier =
-        ColumnHelper.getColumnQualifier(this.columnPrefixBytes, qualifier);
-    return column.readResult(result, columnQualifier);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResults(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K> Map<K, Object> readResults(Result result,
-      KeyConverter<K> keyConverter) throws IOException {
-    return column.readResults(result, columnPrefixBytes, keyConverter);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix
-   * #readResultsWithTimestamps(org.apache.hadoop.hbase.client.Result,
-   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter)
-   */
-  public <K, V> NavigableMap<K, NavigableMap<Long, V>>
-      readResultsWithTimestamps(Result result, KeyConverter<K> keyConverter)
-      throws IOException {
-    return column.readResultsWithTimestamps(result, columnPrefixBytes,
-        keyConverter);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
deleted file mode 100644
index fb1f774..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKey.java
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
-
-import java.util.List;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
-
-/**
- * Represents a rowkey for the sub app table.
- */
-public class SubApplicationRowKey {
-  private final String subAppUserId;
-  private final String clusterId;
-  private final String entityType;
-  private final Long entityIdPrefix;
-  private final String entityId;
-  private final String userId;
-  private final SubApplicationRowKeyConverter subAppRowKeyConverter =
-      new SubApplicationRowKeyConverter();
-
-  public SubApplicationRowKey(String subAppUserId, String clusterId,
-      String entityType, Long entityIdPrefix, String entityId, String userId) {
-    this.subAppUserId = subAppUserId;
-    this.clusterId = clusterId;
-    this.entityType = entityType;
-    this.entityIdPrefix = entityIdPrefix;
-    this.entityId = entityId;
-    this.userId = userId;
-  }
-
-  public String getClusterId() {
-    return clusterId;
-  }
-
-  public String getSubAppUserId() {
-    return subAppUserId;
-  }
-
-  public String getEntityType() {
-    return entityType;
-  }
-
-  public String getEntityId() {
-    return entityId;
-  }
-
-  public Long getEntityIdPrefix() {
-    return entityIdPrefix;
-  }
-
-  public String getUserId() {
-    return userId;
-  }
-
-  /**
-   * Constructs a row key for the sub app table as follows:
-   * {@code subAppUserId!clusterId!entityType
-   * !entityPrefix!entityId!userId}.
-   * Typically used while querying a specific sub app.
-   *
-   * subAppUserId is usually the doAsUser.
-   * userId is the yarn user that the AM runs as.
-   *
-   * @return byte array with the row key.
-   */
-  public byte[] getRowKey() {
-    return subAppRowKeyConverter.encode(this);
-  }
-
-  /**
-   * Given the raw row key as bytes, returns the row key as an object.
-   *
-   * @param rowKey byte representation of row key.
-   * @return An <cite>SubApplicationRowKey</cite> object.
-   */
-  public static SubApplicationRowKey parseRowKey(byte[] rowKey) {
-    return new SubApplicationRowKeyConverter().decode(rowKey);
-  }
-
-  /**
-   * Constructs a row key for the sub app table as follows:
-   * <p>
-   * {@code subAppUserId!clusterId!
-   * entityType!entityIdPrefix!entityId!userId}.
-   *
-   * subAppUserId is usually the doAsUser.
-   * userId is the yarn user that that the AM runs as.
-   *
-   * </p>
-   *
-   * @return String representation of row key.
-   */
-  public String getRowKeyAsString() {
-    return subAppRowKeyConverter.encodeAsString(this);
-  }
-
-  /**
-   * Given the encoded row key as string, returns the row key as an object.
-   *
-   * @param encodedRowKey String representation of row key.
-   * @return A <cite>SubApplicationRowKey</cite> object.
-   */
-  public static SubApplicationRowKey parseRowKeyFromString(
-      String encodedRowKey) {
-    return new SubApplicationRowKeyConverter().decodeFromString(encodedRowKey);
-  }
-
-  /**
-   * Encodes and decodes row key for sub app table.
-   * The row key is of the form :
-   * subAppUserId!clusterId!flowRunId!appId!entityType!entityId!userId
-   *
-   * subAppUserId is usually the doAsUser.
-   * userId is the yarn user that the AM runs as.
-   *
-   * <p>
-   */
-  final private static class SubApplicationRowKeyConverter
-      implements KeyConverter<SubApplicationRowKey>,
-      KeyConverterToString<SubApplicationRowKey> {
-
-    private SubApplicationRowKeyConverter() {
-    }
-
-    /**
-     * sub app row key is of the form
-     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
-     * w. each segment separated by !.
-     *
-     * subAppUserId is usually the doAsUser.
-     * userId is the yarn user that the AM runs as.
-     *
-     * The sizes below indicate sizes of each one of these
-     * segments in sequence. clusterId, subAppUserId, entityType,
-     * entityId and userId are strings.
-     * entity prefix is a long hence 8 bytes in size. Strings are
-     * variable in size (i.e. end whenever separator is encountered).
-     * This is used while decoding and helps in determining where to split.
-     */
-    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
-        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
-        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE};
-
-    /*
-     * (non-Javadoc)
-     *
-     * Encodes SubApplicationRowKey object into a byte array with each
-     * component/field in SubApplicationRowKey separated by
-     * Separator#QUALIFIERS.
-     * This leads to an sub app table row key of the form
-     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
-     *
-     * subAppUserId is usually the doAsUser.
-     * userId is the yarn user that the AM runs as.
-     *
-     * If entityType in passed SubApplicationRowKey object is null (and the
-     * fields preceding it are not null i.e. clusterId, subAppUserId), this
-     * returns a row key prefix of the form subAppUserId!clusterId!
-     * If entityId in SubApplicationRowKey is null
-     * (other components are not null), this returns a row key prefix
-     * of the form subAppUserId!clusterId!entityType!
-     *
-     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#encode(java.lang.Object)
-     */
-    @Override
-    public byte[] encode(SubApplicationRowKey rowKey) {
-      byte[] subAppUser = Separator.encode(rowKey.getSubAppUserId(),
-          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
-      byte[] cluster = Separator.encode(rowKey.getClusterId(), Separator.SPACE,
-          Separator.TAB, Separator.QUALIFIERS);
-      byte[] first = Separator.QUALIFIERS.join(subAppUser, cluster);
-      if (rowKey.getEntityType() == null) {
-        return first;
-      }
-      byte[] entityType = Separator.encode(rowKey.getEntityType(),
-          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
-
-      if (rowKey.getEntityIdPrefix() == null) {
-        return Separator.QUALIFIERS.join(first, entityType,
-            Separator.EMPTY_BYTES);
-      }
-
-      byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
-
-      if (rowKey.getEntityId() == null) {
-        return Separator.QUALIFIERS.join(first, entityType, entityIdPrefix,
-            Separator.EMPTY_BYTES);
-      }
-
-      byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
-          Separator.TAB, Separator.QUALIFIERS);
-
-      byte[] userId = Separator.encode(rowKey.getUserId(),
-          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS);
-
-      byte[] second = Separator.QUALIFIERS.join(entityType, entityIdPrefix,
-          entityId, userId);
-
-      return Separator.QUALIFIERS.join(first, second);
-    }
-
-    /*
-     * (non-Javadoc)
-     *
-     * Decodes a sub application row key of the form
-     * subAppUserId!clusterId!entityType!entityPrefix!entityId!userId
-     *
-     * subAppUserId is usually the doAsUser.
-     * userId is the yarn user that the AM runs as.
-     *
-     * represented in byte format
-     * and converts it into an SubApplicationRowKey object.
-     *
-     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
-     * .KeyConverter#decode(byte[])
-     */
-    @Override
-    public SubApplicationRowKey decode(byte[] rowKey) {
-      byte[][] rowKeyComponents =
-          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
-      if (rowKeyComponents.length != 6) {
-        throw new IllegalArgumentException(
-            "the row key is not valid for " + "a sub app");
-      }
-      String subAppUserId =
-          Separator.decode(Bytes.toString(rowKeyComponents[0]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String clusterId = Separator.decode(Bytes.toString(rowKeyComponents[1]),
-          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String entityType = Separator.decode(Bytes.toString(rowKeyComponents[2]),
-          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-
-      Long entityPrefixId = Bytes.toLong(rowKeyComponents[3]);
-
-      String entityId = Separator.decode(Bytes.toString(rowKeyComponents[4]),
-          Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-      String userId =
-          Separator.decode(Bytes.toString(rowKeyComponents[5]),
-              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
-
-      return new SubApplicationRowKey(subAppUserId, clusterId, entityType,
-          entityPrefixId, entityId, userId);
-    }
-
-    @Override
-    public String encodeAsString(SubApplicationRowKey key) {
-      if (key.subAppUserId == null || key.clusterId == null
-          || key.entityType == null || key.entityIdPrefix == null
-          || key.entityId == null || key.userId == null) {
-        throw new IllegalArgumentException();
-      }
-      return TimelineReaderUtils.joinAndEscapeStrings(
-          new String[] {key.subAppUserId, key.clusterId, key.entityType,
-              key.entityIdPrefix.toString(), key.entityId, key.userId});
-    }
-
-    @Override
-    public SubApplicationRowKey decodeFromString(String encodedRowKey) {
-      List<String> split = TimelineReaderUtils.split(encodedRowKey);
-      if (split == null || split.size() != 6) {
-        throw new IllegalArgumentException(
-            "Invalid row key for sub app table.");
-      }
-      Long entityIdPrefix = Long.valueOf(split.get(3));
-      return new SubApplicationRowKey(split.get(0), split.get(1),
-          split.get(2), entityIdPrefix, split.get(4), split.get(5));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
deleted file mode 100644
index 0c04959..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationRowKeyPrefix.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
-
-import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
-
-/**
- * Represents a partial rowkey without the entityId or without entityType and
- * entityId for the sub application table.
- *
- */
-public class SubApplicationRowKeyPrefix extends SubApplicationRowKey
-    implements RowKeyPrefix<SubApplicationRowKey> {
-
-  /**
-   * Creates a prefix which generates the following rowKeyPrefixes for the sub
-   * application table:
-   * {@code subAppUserId!clusterId!entityType!entityPrefix!userId}.
-   *
-   * @param subAppUserId
-   *          identifying the subApp User
-   * @param clusterId
-   *          identifying the cluster
-   * @param entityType
-   *          which entity type
-   * @param entityIdPrefix
-   *          for entityId
-   * @param entityId
-   *          for an entity
-   * @param userId
-   *          for the user who runs the AM
-   *
-   * subAppUserId is usually the doAsUser.
-   * userId is the yarn user that the AM runs as.
-   *
-   */
-  public SubApplicationRowKeyPrefix(String subAppUserId, String clusterId,
-      String entityType, Long entityIdPrefix, String entityId,
-      String userId) {
-    super(subAppUserId, clusterId, entityType, entityIdPrefix, entityId,
-        userId);
-  }
-
-  /*
-   * (non-Javadoc)
-   *
-   * @see org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.
-   * RowKeyPrefix#getRowKeyPrefix()
-   */
-  public byte[] getRowKeyPrefix() {
-    return super.getRowKey();
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HDFS-13175. Add more information for checking argument in DiskBalancerVolume. Contributed by Lei (Eddy) Xu.

Posted by ha...@apache.org.
HDFS-13175. Add more information for checking argument in DiskBalancerVolume.
Contributed by  Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/121e1e12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/121e1e12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/121e1e12

Branch: refs/heads/HDFS-12996
Commit: 121e1e1280c7b019f6d2cc3ba9eae1ead0dd8408
Parents: b0d3c87
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Feb 20 19:16:30 2018 -0800
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Feb 20 19:16:30 2018 -0800

----------------------------------------------------------------------
 .../server/diskbalancer/command/PlanCommand.java    | 16 ++++++++--------
 .../connectors/DBNameNodeConnector.java             |  2 --
 .../diskbalancer/datamodel/DiskBalancerVolume.java  |  4 +++-
 3 files changed, 11 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/121e1e12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 6e45b96..b765885 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -124,6 +124,14 @@ public class PlanCommand extends Command {
       throw new IllegalArgumentException("Unable to find the specified node. " +
           cmd.getOptionValue(DiskBalancerCLI.PLAN));
     }
+
+    try (FSDataOutputStream beforeStream = create(String.format(
+        DiskBalancerCLI.BEFORE_TEMPLATE,
+        cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
+      beforeStream.write(getCluster().toJson()
+          .getBytes(StandardCharsets.UTF_8));
+    }
+
     this.thresholdPercentage = getThresholdPercentage(cmd);
 
     LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
@@ -138,14 +146,6 @@ public class PlanCommand extends Command {
       plan = plans.get(0);
     }
 
-
-    try (FSDataOutputStream beforeStream = create(String.format(
-        DiskBalancerCLI.BEFORE_TEMPLATE,
-        cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
-      beforeStream.write(getCluster().toJson()
-          .getBytes(StandardCharsets.UTF_8));
-    }
-
     try {
       if (plan != null && plan.getVolumeSetPlans().size() > 0) {
         outputLine = String.format("Writing plan to:");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/121e1e12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
index b044baf..2d8ba8a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
@@ -144,8 +144,6 @@ class DBNameNodeConnector implements ClusterConnector {
       // Does it make sense ? Balancer does do that. Right now
       // we only deal with volumes and not blockPools
 
-      volume.setUsed(report.getDfsUsed());
-
       volume.setUuid(storage.getStorageID());
 
       // we will skip this volume for disk balancer if

http://git-wip-us.apache.org/repos/asf/hadoop/blob/121e1e12/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index 47a925c..a9fd7f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -269,7 +269,9 @@ public class DiskBalancerVolume {
    * @param dfsUsedSpace - dfsUsedSpace for this volume.
    */
   public void setUsed(long dfsUsedSpace) {
-    Preconditions.checkArgument(dfsUsedSpace < this.getCapacity());
+    Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(),
+        "DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)",
+        dfsUsedSpace, getCapacity());
     this.used = dfsUsedSpace;
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
new file mode 100644
index 0000000..0857980
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongConverter.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Encodes a value by interpreting it as a Long and converting it to bytes and
+ * decodes a set of bytes as a Long.
+ */
+public final class LongConverter implements NumericValueConverter,
+    Serializable {
+
+  /**
+   * Added because we implement Comparator<Number>.
+   */
+  private static final long serialVersionUID = 1L;
+
+  public LongConverter() {
+  }
+
+  @Override
+  public byte[] encodeValue(Object value) throws IOException {
+    if (!HBaseTimelineSchemaUtils.isIntegralValue(value)) {
+      throw new IOException("Expected integral value");
+    }
+    return Bytes.toBytes(((Number)value).longValue());
+  }
+
+  @Override
+  public Object decodeValue(byte[] bytes) throws IOException {
+    if (bytes == null) {
+      return null;
+    }
+    return Bytes.toLong(bytes);
+  }
+
+  /**
+   * Compares two numbers as longs. If either number is null, it will be taken
+   * as 0.
+   *
+   * @param num1 the first {@code Long} to compare.
+   * @param num2 the second {@code Long} to compare.
+   * @return -1 if num1 is less than num2, 0 if num1 is equal to num2 and 1 if
+   * num1 is greater than num2.
+   */
+  @Override
+  public int compare(Number num1, Number num2) {
+    return Long.compare((num1 == null) ? 0L : num1.longValue(),
+        (num2 == null) ? 0L : num2.longValue());
+  }
+
+  @Override
+  public Number add(Number num1, Number num2, Number...numbers) {
+    long sum = ((num1 == null) ? 0L : num1.longValue()) +
+        ((num2 == null) ? 0L : num2.longValue());
+    for (Number num : numbers) {
+      sum = sum + ((num == null) ? 0L : num.longValue());
+    }
+    return sum;
+  }
+
+  /**
+   * Converts a timestamp into it's inverse timestamp to be used in (row) keys
+   * where we want to have the most recent timestamp in the top of the table
+   * (scans start at the most recent timestamp first).
+   *
+   * @param key value to be inverted so that the latest version will be first in
+   *          a scan.
+   * @return inverted long
+   */
+  public static long invertLong(long key) {
+    return Long.MAX_VALUE - key;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java
new file mode 100644
index 0000000..4a724d6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/LongKeyConverter.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+/**
+ * Encodes and decodes column names / row keys which are long.
+ */
+public final class LongKeyConverter implements KeyConverter<Long> {
+
+  /**
+   * To delegate the actual work to.
+   */
+  private final LongConverter longConverter = new LongConverter();
+
+  public LongKeyConverter() {
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #encode(java.lang.Object)
+   */
+  @Override
+  public byte[] encode(Long key) {
+    try {
+      // IOException will not be thrown here as we are explicitly passing
+      // Long.
+      return longConverter.encodeValue(key);
+    } catch (IOException e) {
+      return null;
+    }
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #decode(byte[])
+   */
+  @Override
+  public Long decode(byte[] bytes) {
+    try {
+      return (Long) longConverter.decodeValue(bytes);
+    } catch (IOException e) {
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/NumericValueConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/NumericValueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/NumericValueConverter.java
new file mode 100644
index 0000000..8fb6536
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/NumericValueConverter.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.util.Comparator;
+
+/**
+ * Extends ValueConverter interface for numeric converters to support numerical
+ * operations such as comparison, addition, etc.
+ */
+public interface NumericValueConverter extends ValueConverter,
+    Comparator<Number> {
+  /**
+   * Adds two or more numbers. If either of the numbers are null, it is taken as
+   * 0.
+   *
+   * @param num1 the first number to add.
+   * @param num2 the second number to add.
+   * @param numbers Rest of the numbers to be added.
+   * @return result after adding up the numbers.
+   */
+  Number add(Number num1, Number num2, Number...numbers);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Range.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Range.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Range.java
new file mode 100644
index 0000000..8a2e01a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Range.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Encapsulates a range with start and end indices.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class Range {
+  private final int startIdx;
+  private final int endIdx;
+
+  /**
+   * Defines a range from start index (inclusive) to end index (exclusive).
+   *
+   * @param start
+   *          Starting index position
+   * @param end
+   *          Ending index position (exclusive)
+   */
+  public Range(int start, int end) {
+    if (start < 0 || end < start) {
+      throw new IllegalArgumentException(
+          "Invalid range, required that: 0 <= start <= end; start=" + start
+              + ", end=" + end);
+    }
+
+    this.startIdx = start;
+    this.endIdx = end;
+  }
+
+  public int start() {
+    return startIdx;
+  }
+
+  public int end() {
+    return endIdx;
+  }
+
+  public int length() {
+    return endIdx - startIdx;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/RowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/RowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/RowKeyPrefix.java
new file mode 100644
index 0000000..6159dc7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/RowKeyPrefix.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+/**
+ * In queries where a single result is needed, an exact rowkey can be used
+ * through the corresponding rowkey#getRowKey() method. For queries that need to
+ * scan over a range of rowkeys, a partial (the initial part) of rowkeys are
+ * used. Classes implementing RowKeyPrefix indicate that they are the initial
+ * part of rowkeys, with different constructors with fewer number of argument to
+ * form a partial rowkey, a prefix.
+ *
+ * @param <R> indicating the type of rowkey that a particular implementation is
+ *          a prefix for.
+ */
+public interface RowKeyPrefix<R> {
+
+  /**
+   * Create a row key prefix, meaning a partial rowkey that can be used in range
+   * scans. Which fields are included in the prefix will depend on the
+   * constructor of the specific instance that was used. Output depends on which
+   * constructor was used.
+   * @return a prefix of the following form {@code fist!second!...!last!}
+   */
+  byte[] getRowKeyPrefix();
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
new file mode 100644
index 0000000..5090b4d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/Separator.java
@@ -0,0 +1,575 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Used to separate row qualifiers, column qualifiers and compound fields.
+ */
+public enum Separator {
+
+  /**
+   * separator in key or column qualifier fields.
+   */
+  QUALIFIERS("!", "%0$"),
+
+  /**
+   * separator in values, and/or compound key/column qualifier fields.
+   */
+  VALUES("=", "%1$"),
+
+  /**
+   * separator in values, often used to avoid having these in qualifiers and
+   * names. Note that if we use HTML form encoding through URLEncoder, we end up
+   * getting a + for a space, which may already occur in strings, so we don't
+   * want that.
+   */
+  SPACE(" ", "%2$"),
+
+  /**
+   * separator in values, often used to avoid having these in qualifiers and
+   * names.
+   */
+  TAB("\t", "%3$");
+
+  // a reserved character that starts each of the encoded values and is encoded
+  // first in order to escape naturally occurring instances of encoded values
+  // although it can be expressed as an enum instance, we define them as private
+  // variables to hide it from callers
+  private static final String PERCENT = "%";
+  private static final String PERCENT_ENCODED = "%9$";
+
+  private static final Pattern PERCENT_PATTERN =
+      Pattern.compile(PERCENT, Pattern.LITERAL);
+  private static final String PERCENT_REPLACEMENT =
+      Matcher.quoteReplacement(PERCENT);
+
+  private static final Pattern PERCENT_ENCODED_PATTERN =
+      Pattern.compile(PERCENT_ENCODED, Pattern.LITERAL);
+  private static final String PERCENT_ENCODED_REPLACEMENT =
+      Matcher.quoteReplacement(PERCENT_ENCODED);
+
+  /**
+   * The string value of this separator.
+   */
+  private final String value;
+
+  /**
+   * The bye representation of value.
+   */
+  private final byte[] bytes;
+
+  // pre-compiled patterns and quoted replacements for optimization
+  private final Pattern valuePattern;
+  private final String valueReplacement;
+
+  private final Pattern encodedValuePattern;
+  private final String encodedValueReplacement;
+
+  /**
+   * Indicator for variable size of an individual segment in a split. The
+   * segment ends wherever separator is encountered.
+   * Typically used for string.
+   * Also used to indicate that there is no fixed number of splits which need to
+   * be returned. If split limit is specified as this, all possible splits are
+   * returned.
+   */
+  public static final int VARIABLE_SIZE = 0;
+
+
+  /** empty string. */
+  public static final String EMPTY_STRING = "";
+
+  /** empty bytes. */
+  public static final byte[] EMPTY_BYTES = new byte[0];
+
+  /**
+   * @param value of the separator to use. Cannot be null or empty string.
+   * @param encodedValue choose something that isn't likely to occur in the data
+   *          itself. Cannot be null or empty string.
+   */
+  private Separator(String value, String encodedValue) {
+    this.value = value;
+
+    // validation
+    if (value == null || value.length() == 0 || encodedValue == null
+        || encodedValue.length() == 0) {
+      throw new IllegalArgumentException(
+          "Cannot create separator from null or empty string.");
+    }
+
+    this.bytes = Bytes.toBytes(value);
+    this.valuePattern = Pattern.compile(value, Pattern.LITERAL);
+    this.valueReplacement = Matcher.quoteReplacement(value);
+
+    this.encodedValuePattern = Pattern.compile(encodedValue, Pattern.LITERAL);
+    this.encodedValueReplacement = Matcher.quoteReplacement(encodedValue);
+  }
+
+  /**
+   * @return the original value of the separator
+   */
+  public String getValue() {
+    return value;
+  }
+
+  /**
+   * Used to make token safe to be used with this separator without collisions.
+   * It <em>must</em> be paired with {@link #decode(String)} for it to be
+   * decoded correctly.
+   * <p>
+   * If you need to encode a given string for multiple separators,
+   * {@link #encode(String, Separator...)} should be used over successive
+   * invocations of this method. It will result in a more compact version of the
+   * encoded value.
+   *
+   * @param token Token to be encoded.
+   * @return the token with any occurrences of this separator URLEncoded.
+   */
+  public String encode(String token) {
+    if (token == null || token.length() == 0) {
+      // Nothing to replace
+      return token;
+    }
+    // first encode the percent to escape naturally occurring encoded values
+    String escaped = encodePercent(token);
+    return encodeSingle(escaped, this);
+  }
+
+  private static String replace(String token, Pattern pattern,
+      String replacement) {
+    return pattern.matcher(token).replaceAll(replacement);
+  }
+
+  private static String encodeSingle(String token, Separator separator) {
+    return replace(token, separator.valuePattern,
+        separator.encodedValueReplacement);
+  }
+
+  private static String encodePercent(String token) {
+    return replace(token, PERCENT_PATTERN, PERCENT_ENCODED_REPLACEMENT);
+  }
+
+  /**
+   * Decode the token encoded using {@link #encode(String)}. It <em>must</em> be
+   * used for the result encoded with {@link #encode(String)} to be able to
+   * recover the original.
+   *
+   * @param token Token to be decoded.
+   * @return the token with any occurrences of the encoded separator replaced by
+   *         the separator itself.
+   */
+  public String decode(String token) {
+    if (token == null || token.length() == 0) {
+      // Nothing to replace
+      return token;
+    }
+    String escaped = decodeSingle(token, this);
+    // decode percent to de-escape
+    return decodePercent(escaped);
+  }
+
+  private static String decodeSingle(String token, Separator separator) {
+    return replace(token, separator.encodedValuePattern,
+        separator.valueReplacement);
+  }
+
+  private static String decodePercent(String token) {
+    return replace(token, PERCENT_ENCODED_PATTERN, PERCENT_REPLACEMENT);
+  }
+
+  /**
+   * Encode the given separators in the token with their encoding equivalents.
+   * It <em>must</em> be paired with {@link #decode(byte[], Separator...)} or
+   * {@link #decode(String, Separator...)} with the same separators for it to be
+   * decoded correctly.
+   * <p>
+   * If you need to encode a given string for multiple separators, this form of
+   * encoding should be used over successive invocations of
+   * {@link #encode(String)}. It will result in a more compact version of the
+   * encoded value.
+   *
+   * @param token containing possible separators that need to be encoded.
+   * @param separators to be encoded in the token with their URLEncoding
+   *          equivalent.
+   * @return non-null byte representation of the token with occurrences of the
+   *         separators encoded.
+   */
+  public static byte[] encode(String token, Separator... separators) {
+    if (token == null || token.length() == 0) {
+      return EMPTY_BYTES;
+    }
+    String result = token;
+    // first encode the percent to escape naturally occurring encoded values
+    result = encodePercent(token);
+    for (Separator separator : separators) {
+      if (separator != null) {
+        result = encodeSingle(result, separator);
+      }
+    }
+    return Bytes.toBytes(result);
+  }
+
+  /**
+   * Decode the given separators in the token with their decoding equivalents.
+   * It <em>must</em> be used for the result encoded with
+   * {@link #encode(String, Separator...)} with the same separators to be able
+   * to recover the original.
+   *
+   * @param token containing possible separators that need to be encoded.
+   * @param separators to be encoded in the token with their URLEncoding
+   *          equivalent.
+   * @return String representation of the token with occurrences of the URL
+   *         encoded separators decoded.
+   */
+  public static String decode(byte[] token, Separator... separators) {
+    if (token == null) {
+      return null;
+    }
+    return decode(Bytes.toString(token), separators);
+  }
+
+  /**
+   * Decode the given separators in the token with their decoding equivalents.
+   * It <em>must</em> be used for the result encoded with
+   * {@link #encode(String, Separator...)} with the same separators to be able
+   * to recover the original.
+   *
+   * @param token containing possible separators that need to be encoded.
+   * @param separators to be encoded in the token with their URLEncoding
+   *          equivalent.
+   * @return String representation of the token with occurrences of the URL
+   *         encoded separators decoded.
+   */
+  public static String decode(String token, Separator... separators) {
+    if (token == null) {
+      return null;
+    }
+    String result = token;
+    for (Separator separator : separators) {
+      if (separator != null) {
+        result = decodeSingle(result, separator);
+      }
+    }
+    // decode percent to de-escape
+    return decodePercent(result);
+  }
+
+  /**
+   * Returns a single byte array containing all of the individual arrays
+   * components separated by this separator.
+   *
+   * @param components Byte array components to be joined together.
+   * @return byte array after joining the components
+   */
+  public byte[] join(byte[]... components) {
+    if (components == null || components.length == 0) {
+      return EMPTY_BYTES;
+    }
+
+    int finalSize = 0;
+    finalSize = this.value.length() * (components.length - 1);
+    for (byte[] comp : components) {
+      if (comp != null) {
+        finalSize += comp.length;
+      }
+    }
+
+    byte[] buf = new byte[finalSize];
+    int offset = 0;
+    for (int i = 0; i < components.length; i++) {
+      if (components[i] != null) {
+        System.arraycopy(components[i], 0, buf, offset, components[i].length);
+        offset += components[i].length;
+      }
+      if (i < (components.length - 1)) {
+        System.arraycopy(this.bytes, 0, buf, offset, this.value.length());
+        offset += this.value.length();
+      }
+    }
+    return buf;
+  }
+
+  /**
+   * Concatenates items (as String), using this separator.
+   *
+   * @param items Items join, {@code toString()} will be called in each item.
+   *          Any occurrence of the separator in the individual strings will be
+   *          first encoded. Cannot be null.
+   * @return non-null joined result. Note that when separator is {@literal null}
+   *         the result is simply all items concatenated and the process is not
+   *         reversible through {@link #splitEncoded(String)}
+   */
+  public String joinEncoded(String... items) {
+    if (items == null || items.length == 0) {
+      return "";
+    }
+
+    StringBuilder sb = new StringBuilder(encode(items[0].toString()));
+    // Start at 1, we've already grabbed the first value at index 0
+    for (int i = 1; i < items.length; i++) {
+      sb.append(this.value);
+      sb.append(encode(items[i].toString()));
+    }
+
+    return sb.toString();
+  }
+
+  /**
+   * Concatenates items (as String), using this separator.
+   *
+   * @param items Items join, {@code toString()} will be called in each item.
+   *          Any occurrence of the separator in the individual strings will be
+   *          first encoded. Cannot be null.
+   * @return non-null joined result. Note that when separator is {@literal null}
+   *         the result is simply all items concatenated and the process is not
+   *         reversible through {@link #splitEncoded(String)}
+   */
+  public String joinEncoded(Iterable<?> items) {
+    if (items == null) {
+      return "";
+    }
+    Iterator<?> i = items.iterator();
+    if (!i.hasNext()) {
+      return "";
+    }
+
+    StringBuilder sb = new StringBuilder(encode(i.next().toString()));
+    while (i.hasNext()) {
+      sb.append(this.value);
+      sb.append(encode(i.next().toString()));
+    }
+
+    return sb.toString();
+  }
+
+  /**
+   * @param compoundValue containing individual values separated by this
+   *          separator, which have that separator encoded.
+   * @return non-null set of values from the compoundValue with the separator
+   *         decoded.
+   */
+  public Collection<String> splitEncoded(String compoundValue) {
+    List<String> result = new ArrayList<String>();
+    if (compoundValue != null) {
+      for (String val : valuePattern.split(compoundValue)) {
+        result.add(decode(val));
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Splits the source array into multiple array segments using this separator,
+   * up to a maximum of count items. This will naturally produce copied byte
+   * arrays for each of the split segments.
+   *
+   * @param source to be split
+   * @param limit on how many segments are supposed to be returned. A
+   *          non-positive value indicates no limit on number of segments.
+   * @return source split by this separator.
+   */
+  public byte[][] split(byte[] source, int limit) {
+    return split(source, this.bytes, limit);
+  }
+
+  /**
+   * Splits the source array into multiple array segments using this separator.
+   * The sizes indicate the sizes of the relative components/segments.
+   * In case one of the segments contains this separator before the specified
+   * size is reached, the separator will be considered part of that segment and
+   * we will continue till size is reached.
+   * Variable length strings cannot contain this separator and are indiced with
+   * a size of {@value #VARIABLE_SIZE}. Such strings are encoded for this
+   * separator and decoded after the results from split is returned.
+   *
+   * @param source byte array to be split.
+   * @param sizes sizes of relative components/segments.
+   * @return source split by this separator as per the sizes specified..
+   */
+  public byte[][] split(byte[] source, int[] sizes) {
+    return split(source, this.bytes, sizes);
+  }
+
+  /**
+   * Splits the source array into multiple array segments using this separator,
+   * as many times as splits are found. This will naturally produce copied byte
+   * arrays for each of the split segments.
+   *
+   * @param source byte array to be split
+   * @return source split by this separator.
+   */
+  public byte[][] split(byte[] source) {
+    return split(source, this.bytes);
+  }
+
+  /**
+   * Returns a list of ranges identifying [start, end) -- closed, open --
+   * positions within the source byte array that would be split using the
+   * separator byte array.
+   * The sizes indicate the sizes of the relative components/segments.
+   * In case one of the segments contains this separator before the specified
+   * size is reached, the separator will be considered part of that segment and
+   * we will continue till size is reached.
+   * Variable length strings cannot contain this separator and are indiced with
+   * a size of {@value #VARIABLE_SIZE}. Such strings are encoded for this
+   * separator and decoded after the results from split is returned.
+   *
+   * @param source the source data
+   * @param separator the separator pattern to look for
+   * @param sizes indicate the sizes of the relative components/segments.
+   * @return a list of ranges.
+   */
+  private static List<Range> splitRanges(byte[] source, byte[] separator,
+      int[] sizes) {
+    List<Range> segments = new ArrayList<Range>();
+    if (source == null || separator == null) {
+      return segments;
+    }
+    // VARIABLE_SIZE here indicates that there is no limit to number of segments
+    // to return.
+    int limit = VARIABLE_SIZE;
+    if (sizes != null && sizes.length > 0) {
+      limit = sizes.length;
+    }
+    int start = 0;
+    int currentSegment = 0;
+    itersource: for (int i = 0; i < source.length; i++) {
+      for (int j = 0; j < separator.length; j++) {
+        if (source[i + j] != separator[j]) {
+          continue itersource;
+        }
+      }
+      // all separator elements matched
+      if (limit > VARIABLE_SIZE) {
+        if (segments.size() >= (limit - 1)) {
+          // everything else goes in one final segment
+          break;
+        }
+        if (sizes != null) {
+          int currentSegExpectedSize = sizes[currentSegment];
+          if (currentSegExpectedSize > VARIABLE_SIZE) {
+            int currentSegSize = i - start;
+            if (currentSegSize < currentSegExpectedSize) {
+              // Segment not yet complete. More bytes to parse.
+              continue itersource;
+            } else if (currentSegSize > currentSegExpectedSize) {
+              // Segment is not as per size.
+              throw new IllegalArgumentException(
+                  "Segments not separated as per expected sizes");
+            }
+          }
+        }
+      }
+      segments.add(new Range(start, i));
+      start = i + separator.length;
+      // i will be incremented again in outer for loop
+      i += separator.length - 1;
+      currentSegment++;
+    }
+    // add in remaining to a final range
+    if (start <= source.length) {
+      if (sizes != null) {
+        // Check if final segment is as per size specified.
+        if (sizes[currentSegment] > VARIABLE_SIZE &&
+            source.length - start > sizes[currentSegment]) {
+          // Segment is not as per size.
+          throw new IllegalArgumentException(
+              "Segments not separated as per expected sizes");
+        }
+      }
+      segments.add(new Range(start, source.length));
+    }
+    return segments;
+  }
+
+  /**
+   * Splits based on segments calculated based on limit/sizes specified for the
+   * separator.
+   *
+   * @param source byte array to be split.
+   * @param segments specifies the range for each segment.
+   * @return a byte[][] split as per the segment ranges.
+   */
+  private static byte[][] split(byte[] source, List<Range> segments) {
+    byte[][] splits = new byte[segments.size()][];
+    for (int i = 0; i < segments.size(); i++) {
+      Range r = segments.get(i);
+      byte[] tmp = new byte[r.length()];
+      if (tmp.length > 0) {
+        System.arraycopy(source, r.start(), tmp, 0, r.length());
+      }
+      splits[i] = tmp;
+    }
+    return splits;
+  }
+
+  /**
+   * Splits the source array into multiple array segments using the given
+   * separator based on the sizes. This will naturally produce copied byte
+   * arrays for each of the split segments.
+   *
+   * @param source source array.
+   * @param separator separator represented as a byte array.
+   * @param sizes sizes of relative components/segments.
+   * @return byte[][] after splitting the source.
+   */
+  private static byte[][] split(byte[] source, byte[] separator, int[] sizes) {
+    List<Range> segments = splitRanges(source, separator, sizes);
+    return split(source, segments);
+  }
+
+  /**
+   * Splits the source array into multiple array segments using the given
+   * separator. This will naturally produce copied byte arrays for each of the
+   * split segments.
+   *
+   * @param source Source array.
+   * @param separator Separator represented as a byte array.
+   * @return byte[][] after splitting the source.
+   */
+  private static byte[][] split(byte[] source, byte[] separator) {
+    return split(source, separator, (int[]) null);
+  }
+
+  /**
+   * Splits the source array into multiple array segments using the given
+   * separator, up to a maximum of count items. This will naturally produce
+   * copied byte arrays for each of the split segments.
+   *
+   * @param source Source array.
+   * @param separator Separator represented as a byte array.
+   * @param limit a non-positive value indicates no limit on number of segments.
+   * @return byte[][] after splitting the input source.
+   */
+  private static byte[][] split(byte[] source, byte[] separator, int limit) {
+    int[] sizes = null;
+    if (limit > VARIABLE_SIZE) {
+      sizes = new int[limit];
+    }
+    List<Range> segments = splitRanges(source, separator, sizes);
+    return split(source, segments);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/StringKeyConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/StringKeyConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/StringKeyConverter.java
new file mode 100644
index 0000000..282848e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/StringKeyConverter.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+/**
+ * Encodes and decodes column names / row keys which are merely strings.
+ * Column prefixes are not part of the column name passed for encoding. It is
+ * added later, if required in the associated ColumnPrefix implementations.
+ */
+public final class StringKeyConverter implements KeyConverter<String> {
+
+  public StringKeyConverter() {
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #encode(java.lang.Object)
+   */
+  @Override
+  public byte[] encode(String key) {
+    return Separator.encode(key, Separator.SPACE, Separator.TAB);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter
+   * #decode(byte[])
+   */
+  @Override
+  public String decode(byte[] bytes) {
+    return Separator.decode(bytes, Separator.TAB, Separator.SPACE);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
new file mode 100644
index 0000000..d03b37d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/TimestampGenerator.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+
+/**
+ * Utility class that allows HBase coprocessors to interact with unique
+ * timestamps.
+ */
+public class TimestampGenerator {
+
+  /*
+   * if this is changed, then reading cell timestamps written with older
+   * multiplier value will not work
+   */
+  public static final long TS_MULTIPLIER = 1000000L;
+
+  private final AtomicLong lastTimestamp = new AtomicLong();
+
+  /**
+   * Returns the current wall clock time in milliseconds, multiplied by the
+   * required precision.
+   *
+   * @return current timestamp.
+   */
+  public long currentTime() {
+    // We want to align cell timestamps with current time.
+    // cell timestamps are not be less than
+    // System.currentTimeMillis() * TS_MULTIPLIER.
+    return System.currentTimeMillis() * TS_MULTIPLIER;
+  }
+
+  /**
+   * Returns a timestamp value unique within the scope of this
+   * {@code TimestampGenerator} instance. For usage by HBase
+   * {@code RegionObserver} coprocessors, this normally means unique within a
+   * given region.
+   *
+   * Unlikely scenario of generating a non-unique timestamp: if there is a
+   * sustained rate of more than 1M hbase writes per second AND if region fails
+   * over within that time range of timestamps being generated then there may be
+   * collisions writing to a cell version of the same column.
+   *
+   * @return unique timestamp.
+   */
+  public long getUniqueTimestamp() {
+    long lastTs;
+    long nextTs;
+    do {
+      lastTs = lastTimestamp.get();
+      nextTs = Math.max(lastTs + 1, currentTime());
+    } while (!lastTimestamp.compareAndSet(lastTs, nextTs));
+    return nextTs;
+  }
+
+  /**
+   * Returns a timestamp multiplied with TS_MULTIPLIER and last few digits of
+   * application id.
+   *
+   * Unlikely scenario of generating a timestamp that is a duplicate: If more
+   * than a 1M concurrent apps are running in one flow run AND write to same
+   * column at the same time, then say appId of 1M and 1 will overlap
+   * with appId of 001 and there may be collisions for that flow run's
+   * specific column.
+   *
+   * @param incomingTS Timestamp to be converted.
+   * @param appId Application Id.
+   * @return a timestamp multiplied with TS_MULTIPLIER and last few digits of
+   *         application id
+   */
+  public static long getSupplementedTimestamp(long incomingTS, String appId) {
+    long suffix = getAppIdSuffix(appId);
+    long outgoingTS = incomingTS * TS_MULTIPLIER + suffix;
+    return outgoingTS;
+
+  }
+
+  private static long getAppIdSuffix(String appIdStr) {
+    if (appIdStr == null) {
+      return 0L;
+    }
+    ApplicationId appId = ApplicationId.fromString(appIdStr);
+    long id = appId.getId() % TS_MULTIPLIER;
+    return id;
+  }
+
+  /**
+   * truncates the last few digits of the timestamp which were supplemented by
+   * the TimestampGenerator#getSupplementedTimestamp function.
+   *
+   * @param incomingTS Timestamp to be truncated.
+   * @return a truncated timestamp value
+   */
+  public static long getTruncatedTimestamp(long incomingTS) {
+    return incomingTS / TS_MULTIPLIER;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ValueConverter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ValueConverter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ValueConverter.java
new file mode 100644
index 0000000..757a6d3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/ValueConverter.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import java.io.IOException;
+
+/**
+ * Converter used to encode/decode value associated with a column prefix or a
+ * column.
+ */
+public interface ValueConverter {
+
+  /**
+   * Encode an object as a byte array depending on the converter implementation.
+   *
+   * @param value Value to be encoded.
+   * @return a byte array
+   * @throws IOException if any problem is encountered while encoding.
+   */
+  byte[] encodeValue(Object value) throws IOException;
+
+  /**
+   * Decode a byte array and convert it into an object depending on the
+   * converter implementation.
+   *
+   * @param bytes Byte array to be decoded.
+   * @return an object
+   * @throws IOException if any problem is encountered while decoding.
+   */
+  Object decodeValue(byte[] bytes) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
new file mode 100644
index 0000000..0df5b8a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.common contains
+ * a set of utility classes used across backend storage reader and writer.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.common;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
new file mode 100644
index 0000000..81961d3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumn.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies fully qualified columns for the {@link EntityTable}.
+ */
+public enum EntityColumn implements Column<EntityTable> {
+
+  /**
+   * Identifier for the entity.
+   */
+  ID(EntityColumnFamily.INFO, "id"),
+
+  /**
+   * The type of entity.
+   */
+  TYPE(EntityColumnFamily.INFO, "type"),
+
+  /**
+   * When the entity was created.
+   */
+  CREATED_TIME(EntityColumnFamily.INFO, "created_time", new LongConverter()),
+
+  /**
+   * The version of the flow that this entity belongs to.
+   */
+  FLOW_VERSION(EntityColumnFamily.INFO, "flow_version");
+
+  private final ColumnFamily<EntityTable> columnFamily;
+  private final String columnQualifier;
+  private final byte[] columnQualifierBytes;
+  private final ValueConverter valueConverter;
+
+  EntityColumn(ColumnFamily<EntityTable> columnFamily,
+      String columnQualifier) {
+    this(columnFamily, columnQualifier, GenericConverter.getInstance());
+  }
+
+  EntityColumn(ColumnFamily<EntityTable> columnFamily,
+      String columnQualifier, ValueConverter converter) {
+    this.columnFamily = columnFamily;
+    this.columnQualifier = columnQualifier;
+    // Future-proof by ensuring the right column prefix hygiene.
+    this.columnQualifierBytes =
+        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
+    this.valueConverter = converter;
+  }
+
+  /**
+   * @return the column name value
+   */
+  private String getColumnQualifier() {
+    return columnQualifier;
+  }
+
+  @Override
+  public byte[] getColumnQualifierBytes() {
+    return columnQualifierBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return attributes;
+  }
+
+  @Override
+  public boolean supplementCellTimestamp() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnFamily.java
new file mode 100644
index 0000000..7c63727
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnFamily.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the entity table column families.
+ */
+public enum EntityColumnFamily implements ColumnFamily<EntityTable> {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons: a) the size
+   * of the config values can be very large and b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value create a column family with this name. Must be lower case and
+   *          without spaces.
+   */
+  EntityColumnFamily(String value) {
+    // column families should be lower case and not contain any spaces.
+    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+    return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
new file mode 100644
index 0000000..08234d3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityColumnPrefix.java
@@ -0,0 +1,162 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the entity table.
+ */
+public enum EntityColumnPrefix implements ColumnPrefix<EntityTable> {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(EntityColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(EntityColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(EntityColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an entity.
+   */
+  EVENT(EntityColumnFamily.INFO, "e", true),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(EntityColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(EntityColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnFamily<EntityTable> columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+  private final ValueConverter valueConverter;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
+      String columnPrefix) {
+    this(columnFamily, columnPrefix, false, GenericConverter.getInstance());
+  }
+
+  EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
+      String columnPrefix, boolean compondColQual) {
+    this(columnFamily, columnPrefix, compondColQual,
+        GenericConverter.getInstance());
+  }
+
+  EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
+      String columnPrefix, ValueConverter converter) {
+    this(columnFamily, columnPrefix, false, converter);
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  EntityColumnPrefix(ColumnFamily<EntityTable> columnFamily,
+      String columnPrefix, boolean compondColQual, ValueConverter converter) {
+    this.valueConverter = converter;
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes =
+          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
+    }
+  }
+
+  /**
+   * @return the column name value
+   */
+  public String getColumnPrefix() {
+    return columnPrefix;
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixInBytes() {
+    return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return attributes;
+  }
+
+  @Override
+  public boolean supplementCellTimeStamp() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
new file mode 100644
index 0000000..b85a9b0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKey.java
@@ -0,0 +1,299 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.AppIdKeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the entity table.
+ */
+public class EntityRowKey {
+  private final String clusterId;
+  private final String userId;
+  private final String flowName;
+  private final Long flowRunId;
+  private final String appId;
+  private final String entityType;
+  private final Long entityIdPrefix;
+  private final String entityId;
+  private final EntityRowKeyConverter entityRowKeyConverter =
+      new EntityRowKeyConverter();
+
+  public EntityRowKey(String clusterId, String userId, String flowName,
+      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
+      String entityId) {
+    this.clusterId = clusterId;
+    this.userId = userId;
+    this.flowName = flowName;
+    this.flowRunId = flowRunId;
+    this.appId = appId;
+    this.entityType = entityType;
+    this.entityIdPrefix = entityIdPrefix;
+    this.entityId = entityId;
+  }
+
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  public String getFlowName() {
+    return flowName;
+  }
+
+  public Long getFlowRunId() {
+    return flowRunId;
+  }
+
+  public String getAppId() {
+    return appId;
+  }
+
+  public String getEntityType() {
+    return entityType;
+  }
+
+  public String getEntityId() {
+    return entityId;
+  }
+
+  public Long getEntityIdPrefix() {
+    return entityIdPrefix;
+  }
+
+  /**
+   * Constructs a row key for the entity table as follows:
+   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
+   * Typically used while querying a specific entity.
+   *
+   * @return byte array with the row key.
+   */
+  public byte[] getRowKey() {
+    return entityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   * @param rowKey byte representation of row key.
+   * @return An <cite>EntityRowKey</cite> object.
+   */
+  public static EntityRowKey parseRowKey(byte[] rowKey) {
+    return new EntityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the entity table as follows:
+   * <p>
+   * {@code userName!clusterId!flowName!flowRunId!AppId!
+   * entityType!entityIdPrefix!entityId}.
+   * </p>
+   * @return String representation of row key.
+   */
+  public String getRowKeyAsString() {
+    return entityRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>EntityRowKey</cite> object.
+   */
+  public static EntityRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new EntityRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for entity table. The row key is of the form :
+   * userName!clusterId!flowName!flowRunId!appId!entityType!entityId. flowRunId
+   * is a long, appId is encoded/decoded using {@link AppIdKeyConverter} and
+   * rest are strings.
+   * <p>
+   */
+  final private static class EntityRowKeyConverter implements
+      KeyConverter<EntityRowKey>, KeyConverterToString<EntityRowKey> {
+
+    private final AppIdKeyConverter appIDKeyConverter = new AppIdKeyConverter();
+
+    private EntityRowKeyConverter() {
+    }
+
+    /**
+     * Entity row key is of the form
+     * userName!clusterId!flowName!flowRunId!appId!entityType!entityId w. each
+     * segment separated by !. The sizes below indicate sizes of each one of
+     * these segments in sequence. clusterId, userName, flowName, entityType and
+     * entityId are strings. flowrunId is a long hence 8 bytes in size. app id
+     * is represented as 12 bytes with cluster timestamp part of appid being 8
+     * bytes (long) and seq id being 4 bytes(int). Strings are variable in size
+     * (i.e. end whenever separator is encountered). This is used while decoding
+     * and helps in determining where to split.
+     */
+    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG,
+        AppIdKeyConverter.getKeySize(), Separator.VARIABLE_SIZE,
+        Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE };
+
+    /*
+     * (non-Javadoc)
+     *
+     * Encodes EntityRowKey object into a byte array with each component/field
+     * in EntityRowKey separated by Separator#QUALIFIERS. This leads to an
+     * entity table row key of the form
+     * userName!clusterId!flowName!flowRunId!appId!entityType!entityId If
+     * entityType in passed EntityRowKey object is null (and the fields
+     * preceding it i.e. clusterId, userId and flowName, flowRunId and appId
+     * are not null), this returns a row key prefix of the form
+     * userName!clusterId!flowName!flowRunId!appId! and if entityId in
+     * EntityRowKey is null (other 6 components are not null), this returns a
+     * row key prefix of the form
+     * userName!clusterId!flowName!flowRunId!appId!entityType! flowRunId is
+     * inverted while encoding as it helps maintain a descending order for row
+     * keys in entity table.
+     *
+     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#encode(java.lang.Object)
+     */
+    @Override
+    public byte[] encode(EntityRowKey rowKey) {
+      byte[] user =
+          Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
+              Separator.QUALIFIERS);
+      byte[] cluster =
+          Separator.encode(rowKey.getClusterId(), Separator.SPACE,
+              Separator.TAB, Separator.QUALIFIERS);
+      byte[] flow =
+          Separator.encode(rowKey.getFlowName(), Separator.SPACE,
+              Separator.TAB, Separator.QUALIFIERS);
+      byte[] first = Separator.QUALIFIERS.join(user, cluster, flow);
+      // Note that flowRunId is a long, so we can't encode them all at the same
+      // time.
+      byte[] second =
+          Bytes.toBytes(LongConverter.invertLong(rowKey.getFlowRunId()));
+      byte[] third = appIDKeyConverter.encode(rowKey.getAppId());
+      if (rowKey.getEntityType() == null) {
+        return Separator.QUALIFIERS.join(first, second, third,
+            Separator.EMPTY_BYTES);
+      }
+      byte[] entityType =
+          Separator.encode(rowKey.getEntityType(), Separator.SPACE,
+              Separator.TAB, Separator.QUALIFIERS);
+
+      if (rowKey.getEntityIdPrefix() == null) {
+        return Separator.QUALIFIERS.join(first, second, third, entityType,
+            Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityIdPrefix = Bytes.toBytes(rowKey.getEntityIdPrefix());
+
+      if (rowKey.getEntityId() == null) {
+        return Separator.QUALIFIERS.join(first, second, third, entityType,
+            entityIdPrefix, Separator.EMPTY_BYTES);
+      }
+
+      byte[] entityId = Separator.encode(rowKey.getEntityId(), Separator.SPACE,
+          Separator.TAB, Separator.QUALIFIERS);
+
+      byte[] fourth =
+          Separator.QUALIFIERS.join(entityType, entityIdPrefix, entityId);
+
+      return Separator.QUALIFIERS.join(first, second, third, fourth);
+    }
+
+    /*
+     * (non-Javadoc)
+     *
+     * Decodes an application row key of the form
+     * userName!clusterId!flowName!flowRunId!appId!entityType!entityId
+     * represented in byte format and converts it into an EntityRowKey object.
+     * flowRunId is inverted while decoding as it was inverted while encoding.
+     *
+     * @see
+     * org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#decode(byte[])
+     */
+    @Override
+    public EntityRowKey decode(byte[] rowKey) {
+      byte[][] rowKeyComponents =
+          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
+      if (rowKeyComponents.length != 8) {
+        throw new IllegalArgumentException("the row key is not valid for "
+            + "an entity");
+      }
+      String userId =
+          Separator.decode(Bytes.toString(rowKeyComponents[0]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String clusterId =
+          Separator.decode(Bytes.toString(rowKeyComponents[1]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String flowName =
+          Separator.decode(Bytes.toString(rowKeyComponents[2]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      Long flowRunId =
+          LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
+      String appId = appIDKeyConverter.decode(rowKeyComponents[4]);
+      String entityType =
+          Separator.decode(Bytes.toString(rowKeyComponents[5]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+
+      Long entityPrefixId = Bytes.toLong(rowKeyComponents[6]);
+
+      String entityId =
+          Separator.decode(Bytes.toString(rowKeyComponents[7]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      return new EntityRowKey(clusterId, userId, flowName, flowRunId, appId,
+          entityType, entityPrefixId, entityId);
+    }
+
+    @Override
+    public String encodeAsString(EntityRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null || key.appId == null
+          || key.entityType == null || key.entityIdPrefix == null
+          || key.entityId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils
+          .joinAndEscapeStrings(new String[] {key.clusterId, key.userId,
+              key.flowName, key.flowRunId.toString(), key.appId, key.entityType,
+              key.entityIdPrefix.toString(), key.entityId});
+    }
+
+    @Override
+    public EntityRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 8) {
+        throw new IllegalArgumentException("Invalid row key for entity table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      Long entityIdPrefix = Long.valueOf(split.get(6));
+      return new EntityRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId, split.get(4), split.get(5), entityIdPrefix, split.get(7));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
new file mode 100644
index 0000000..47a1789
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityRowKeyPrefix.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+
+/**
+ * Represents a partial rowkey without the entityId or without entityType and
+ * entityId for the entity table.
+ *
+ */
+public class EntityRowKeyPrefix extends EntityRowKey implements
+    RowKeyPrefix<EntityRowKey> {
+
+  /**
+   * Creates a prefix which generates the following rowKeyPrefixes for the
+   * entity table:
+   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!}.
+   * @param clusterId identifying the cluster
+   * @param userId identifying the user
+   * @param flowName identifying the flow
+   * @param flowRunId identifying the individual run of this flow
+   * @param appId identifying the application
+   * @param entityType which entity type
+   * @param entityIdPrefix for entityId
+   * @param entityId for an entity
+   */
+  public EntityRowKeyPrefix(String clusterId, String userId, String flowName,
+      Long flowRunId, String appId, String entityType, Long entityIdPrefix,
+      String entityId) {
+    super(clusterId, userId, flowName, flowRunId, appId, entityType,
+        entityIdPrefix, entityId);
+  }
+
+  /**
+   * Creates a prefix which generates the following rowKeyPrefixes for the
+   * entity table:
+   * {@code userName!clusterId!flowName!flowRunId!AppId!entityType!entityId}.
+   *
+   * @param clusterId identifying the cluster
+   * @param userId identifying the user
+   * @param flowName identifying the flow
+   * @param flowRunId identifying the individual run of this flow
+   * @param appId identifying the application
+   */
+  public EntityRowKeyPrefix(String clusterId, String userId, String flowName,
+      Long flowRunId, String appId) {
+    this(clusterId, userId, flowName, flowRunId, appId, null, null, null);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.application.
+   * RowKeyPrefix#getRowKeyPrefix()
+   */
+  public byte[] getRowKeyPrefix() {
+    return super.getRowKey();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
new file mode 100644
index 0000000..dceeb99
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+
+/**
+ * The entity table as column families info, config and metrics. Info stores
+ * information about a timeline entity object config stores configuration data
+ * of a timeline entity object metrics stores the metrics of a timeline entity
+ * object
+ *
+ * Example entity table record:
+ *
+ * <pre>
+ * |-------------------------------------------------------------------------|
+ * |  Row       | Column Family                | Column Family| Column Family|
+ * |  key       | info                         | metrics      | config       |
+ * |-------------------------------------------------------------------------|
+ * | userName!  | id:entityId                  | metricId1:   | configKey1:  |
+ * | clusterId! |                              | metricValue1 | configValue1 |
+ * | flowName!  | type:entityType              | @timestamp1  |              |
+ * | flowRunId! |                              |              | configKey2:  |
+ * | AppId!     | created_time:                | metricId1:   | configValue2 |
+ * | entityType!| 1392993084018                | metricValue2 |              |
+ * | idPrefix!  |                              | @timestamp2  |              |
+ * | entityId   | i!infoKey:                   |              |              |
+ * |            | infoValue                    | metricId1:   |              |
+ * |            |                              | metricValue1 |              |
+ * |            | r!relatesToKey:              | @timestamp2  |              |
+ * |            | id3=id4=id5                  |              |              |
+ * |            |                              |              |              |
+ * |            | s!isRelatedToKey             |              |              |
+ * |            | id7=id9=id6                  |              |              |
+ * |            |                              |              |              |
+ * |            | e!eventId=timestamp=infoKey: |              |              |
+ * |            | eventInfoValue               |              |              |
+ * |            |                              |              |              |
+ * |            | flowVersion:                 |              |              |
+ * |            | versionValue                 |              |              |
+ * |-------------------------------------------------------------------------|
+ * </pre>
+ */
+public final class EntityTable extends BaseTable<EntityTable> {
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
new file mode 100644
index 0000000..7440316
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java
@@ -0,0 +1,523 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Query;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for application entities that are stored in the
+ * application table.
+ */
+class ApplicationEntityReader extends GenericEntityReader {
+  private static final ApplicationTableRW APPLICATION_TABLE =
+      new ApplicationTableRW();
+
+  ApplicationEntityReader(TimelineReaderContext ctxt,
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, entityFilters, toRetrieve);
+  }
+
+  ApplicationEntityReader(TimelineReaderContext ctxt,
+      TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link ApplicationTableRW}.
+   */
+  protected BaseTableRW<?> getTable() {
+    return APPLICATION_TABLE;
+  }
+
+  /**
+   * This method is called only for multiple entity reads.
+   */
+  @Override
+  protected FilterList constructFilterListBasedOnFilters() throws IOException {
+    // Filters here cannot be null for multiple entity reads as they are set in
+    // augmentParams if null.
+    TimelineEntityFilters filters = getFilters();
+    FilterList listBasedOnFilters = new FilterList();
+    // Create filter list based on created time range and add it to
+    // listBasedOnFilters.
+    long createdTimeBegin = filters.getCreatedTimeBegin();
+    long createdTimeEnd = filters.getCreatedTimeEnd();
+    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
+      listBasedOnFilters.addFilter(
+          TimelineFilterUtils.createSingleColValueFiltersByRange(
+          ApplicationColumn.CREATED_TIME, createdTimeBegin, createdTimeEnd));
+    }
+    // Create filter list based on metric filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList metricFilters = filters.getMetricFilters();
+    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(
+          TimelineFilterUtils.createHBaseFilterList(
+              ApplicationColumnPrefix.METRIC, metricFilters));
+    }
+    // Create filter list based on config filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList configFilters = filters.getConfigFilters();
+    if (configFilters != null && !configFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(
+          TimelineFilterUtils.createHBaseFilterList(
+              ApplicationColumnPrefix.CONFIG, configFilters));
+    }
+    // Create filter list based on info filters and add it to listBasedOnFilters
+    TimelineFilterList infoFilters = filters.getInfoFilters();
+    if (infoFilters != null && !infoFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(
+          TimelineFilterUtils.createHBaseFilterList(
+              ApplicationColumnPrefix.INFO, infoFilters));
+    }
+    return listBasedOnFilters;
+  }
+
+  /**
+   * Add {@link QualifierFilter} filters to filter list for each column of
+   * application table.
+   *
+   * @param list filter list to which qualifier filters have to be added.
+   */
+  @Override
+  protected void updateFixedColumns(FilterList list) {
+    for (ApplicationColumn column : ApplicationColumn.values()) {
+      list.addFilter(new QualifierFilter(CompareOp.EQUAL,
+          new BinaryComparator(column.getColumnQualifierBytes())));
+    }
+  }
+
+  /**
+   * Creates a filter list which indicates that only some of the column
+   * qualifiers in the info column family will be returned in result.
+   *
+   * @return filter list.
+   * @throws IOException if any problem occurs while creating filter list.
+   */
+  private FilterList createFilterListForColsOfInfoFamily()
+      throws IOException {
+    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
+    // Add filters for each column in entity table.
+    updateFixedColumns(infoFamilyColsFilter);
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // If INFO field has to be retrieved, add a filter for fetching columns
+    // with INFO column prefix.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.EQUAL, ApplicationColumnPrefix.INFO));
+    }
+    TimelineFilterList relatesTo = getFilters().getRelatesTo();
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      // If RELATES_TO field has to be retrieved, add a filter for fetching
+      // columns with RELATES_TO column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.EQUAL, ApplicationColumnPrefix.RELATES_TO));
+    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain RELATES_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // relatesTo filters are specified. relatesTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> relatesToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          ApplicationColumnPrefix.RELATES_TO, relatesToCols));
+    }
+    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
+      // columns with IS_RELATED_TO column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.EQUAL, ApplicationColumnPrefix.IS_RELATED_TO));
+    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain IS_RELATED_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // isRelatedTo filters are specified. isRelatedTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> isRelatedToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          ApplicationColumnPrefix.IS_RELATED_TO, isRelatedToCols));
+    }
+    TimelineFilterList eventFilters = getFilters().getEventFilters();
+    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
+      // If EVENTS field has to be retrieved, add a filter for fetching columns
+      // with EVENT column prefix.
+      infoFamilyColsFilter.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.EQUAL, ApplicationColumnPrefix.EVENT));
+    } else if (eventFilters != null && !eventFilters.getFilterList().isEmpty()){
+      // Even if fields to retrieve does not contain EVENTS, we still need to
+      // have a filter to fetch some of the column qualifiers on the basis of
+      // event filters specified. Event filters will then be matched after
+      // fetching rows from HBase.
+      Set<String> eventCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          ApplicationColumnPrefix.EVENT, eventCols));
+    }
+    return infoFamilyColsFilter;
+  }
+
+  /**
+   * Exclude column prefixes via filters which are not required(based on fields
+   * to retrieve) from info column family. These filters are added to filter
+   * list which contains a filter for getting info column family.
+   *
+   * @param infoColFamilyList filter list for info column family.
+   */
+  private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // Events not required.
+    if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.NOT_EQUAL, ApplicationColumnPrefix.EVENT));
+    }
+    // info not required.
+    if (!hasField(fieldsToRetrieve, Field.INFO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.NOT_EQUAL, ApplicationColumnPrefix.INFO));
+    }
+    // is related to not required.
+    if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.NOT_EQUAL, ApplicationColumnPrefix.IS_RELATED_TO));
+    }
+    // relates to not required.
+    if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      infoColFamilyList.addFilter(
+          TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.NOT_EQUAL, ApplicationColumnPrefix.RELATES_TO));
+    }
+  }
+
+  /**
+   * Updates filter list based on fields for confs and metrics to retrieve.
+   *
+   * @param listBasedOnFields filter list based on fields.
+   * @throws IOException if any problem occurs while updating filter list.
+   */
+  private void updateFilterForConfsAndMetricsToRetrieve(
+      FilterList listBasedOnFields) throws IOException {
+    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
+    // Please note that if confsToRetrieve is specified, we would have added
+    // CONFS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
+      // Create a filter list for configs.
+      listBasedOnFields.addFilter(TimelineFilterUtils.
+          createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getConfsToRetrieve(),
+              ApplicationColumnFamily.CONFIGS, ApplicationColumnPrefix.CONFIG));
+    }
+
+    // Please note that if metricsToRetrieve is specified, we would have added
+    // METRICS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
+      // Create a filter list for metrics.
+      listBasedOnFields.addFilter(TimelineFilterUtils.
+          createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getMetricsToRetrieve(),
+              ApplicationColumnFamily.METRICS, ApplicationColumnPrefix.METRIC));
+    }
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFields() throws IOException {
+    if (!needCreateFilterListBasedOnFields()) {
+      // Fetch all the columns. No need of a filter.
+      return null;
+    }
+    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
+    FilterList infoColFamilyList = new FilterList();
+    // By default fetch everything in INFO column family.
+    FamilyFilter infoColumnFamily =
+        new FamilyFilter(CompareOp.EQUAL,
+            new BinaryComparator(ApplicationColumnFamily.INFO.getBytes()));
+    infoColFamilyList.addFilter(infoColumnFamily);
+    if (!isSingleEntityRead() && fetchPartialColsFromInfoFamily()) {
+      // We can fetch only some of the columns from info family.
+      infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
+    } else {
+      // Exclude column prefixes in info column family which are not required
+      // based on fields to retrieve.
+      excludeFieldsFromInfoColFamily(infoColFamilyList);
+    }
+    listBasedOnFields.addFilter(infoColFamilyList);
+
+    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
+    return listBasedOnFields;
+  }
+
+  @Override
+  protected Result getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    TimelineReaderContext context = getContext();
+    ApplicationRowKey applicationRowKey =
+        new ApplicationRowKey(context.getClusterId(), context.getUserId(),
+            context.getFlowName(), context.getFlowRunId(), context.getAppId());
+    byte[] rowKey = applicationRowKey.getRowKey();
+    Get get = new Get(rowKey);
+    // Set time range for metric values.
+    setMetricsTimeRange(get);
+    get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
+    if (filterList != null && !filterList.getFilters().isEmpty()) {
+      get.setFilter(filterList);
+    }
+    return getTable().getResult(hbaseConf, conn, get);
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
+    Preconditions.checkNotNull(
+        getDataToRetrieve(), "data to retrieve shouldn't be null");
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getEntityType(),
+        "entityType shouldn't be null");
+    if (isSingleEntityRead()) {
+      Preconditions.checkNotNull(getContext().getAppId(),
+          "appId shouldn't be null");
+    } else {
+      Preconditions.checkNotNull(getContext().getUserId(),
+          "userId shouldn't be null");
+      Preconditions.checkNotNull(getContext().getFlowName(),
+          "flowName shouldn't be null");
+    }
+  }
+
+  @Override
+  protected void augmentParams(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    if (isSingleEntityRead()) {
+      // Get flow context information from AppToFlow table.
+      defaultAugmentParams(hbaseConf, conn);
+    }
+    // Add configs/metrics to fields to retrieve if confsToRetrieve and/or
+    // metricsToRetrieve are specified.
+    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
+    if (!isSingleEntityRead()) {
+      createFiltersIfNull();
+    }
+  }
+
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.setMetricsTimeRange(
+        query, ApplicationColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
+  }
+
+  @Override
+  protected ResultScanner getResults(Configuration hbaseConf,
+      Connection conn, FilterList filterList) throws IOException {
+    Scan scan = new Scan();
+    TimelineReaderContext context = getContext();
+    RowKeyPrefix<ApplicationRowKey> applicationRowKeyPrefix = null;
+
+    // Whether or not flowRunID is null doesn't matter, the
+    // ApplicationRowKeyPrefix will do the right thing.
+    // default mode, will always scans from beginning of entity type.
+    if (getFilters().getFromId() == null) {
+      applicationRowKeyPrefix = new ApplicationRowKeyPrefix(
+          context.getClusterId(), context.getUserId(), context.getFlowName(),
+          context.getFlowRunId());
+      scan.setRowPrefixFilter(applicationRowKeyPrefix.getRowKeyPrefix());
+    } else {
+      ApplicationRowKey applicationRowKey = null;
+      try {
+        applicationRowKey =
+            ApplicationRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(applicationRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+
+      // set start row
+      scan.setStartRow(applicationRowKey.getRowKey());
+
+      // get the bytes for stop row
+      applicationRowKeyPrefix = new ApplicationRowKeyPrefix(
+          context.getClusterId(), context.getUserId(), context.getFlowName(),
+          context.getFlowRunId());
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              applicationRowKeyPrefix.getRowKeyPrefix()));
+    }
+
+    FilterList newList = new FilterList();
+    newList.addFilter(new PageFilter(getFilters().getLimit()));
+    if (filterList != null && !filterList.getFilters().isEmpty()) {
+      newList.addFilter(filterList);
+    }
+    scan.setFilter(newList);
+
+    // Set time range for metric values.
+    setMetricsTimeRange(scan);
+    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
+    return getTable().getResultScanner(hbaseConf, conn, scan);
+  }
+
+  @Override
+  protected TimelineEntity parseEntity(Result result) throws IOException {
+    if (result == null || result.isEmpty()) {
+      return null;
+    }
+    TimelineEntity entity = new TimelineEntity();
+    entity.setType(TimelineEntityType.YARN_APPLICATION.toString());
+    String entityId =
+        ColumnRWHelper.readResult(result, ApplicationColumn.ID).toString();
+    entity.setId(entityId);
+
+    TimelineEntityFilters filters = getFilters();
+    // fetch created time
+    Long createdTime = (Long) ColumnRWHelper.readResult(result,
+        ApplicationColumn.CREATED_TIME);
+    entity.setCreatedTime(createdTime);
+
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // fetch is related to entities and match isRelatedTo filter. If isRelatedTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // isRelatedTo are not set in HBase scan.
+    boolean checkIsRelatedTo =
+        !isSingleEntityRead() && filters.getIsRelatedTo() != null &&
+        filters.getIsRelatedTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO) || checkIsRelatedTo) {
+      readRelationship(entity, result, ApplicationColumnPrefix.IS_RELATED_TO,
+          true);
+      if (checkIsRelatedTo && !TimelineStorageUtils.matchIsRelatedTo(entity,
+          filters.getIsRelatedTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve,
+          Field.IS_RELATED_TO)) {
+        entity.getIsRelatedToEntities().clear();
+      }
+    }
+
+    // fetch relates to entities and match relatesTo filter. If relatesTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // relatesTo are not set in HBase scan.
+    boolean checkRelatesTo =
+        !isSingleEntityRead() && filters.getRelatesTo() != null &&
+        filters.getRelatesTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO) ||
+        checkRelatesTo) {
+      readRelationship(entity, result, ApplicationColumnPrefix.RELATES_TO,
+          false);
+      if (checkRelatesTo && !TimelineStorageUtils.matchRelatesTo(entity,
+          filters.getRelatesTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+        entity.getRelatesToEntities().clear();
+      }
+    }
+
+    // fetch info if fieldsToRetrieve contains INFO or ALL.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      readKeyValuePairs(entity, result, ApplicationColumnPrefix.INFO, false);
+    }
+
+    // fetch configs if fieldsToRetrieve contains CONFIGS or ALL.
+    if (hasField(fieldsToRetrieve, Field.CONFIGS)) {
+      readKeyValuePairs(entity, result, ApplicationColumnPrefix.CONFIG, true);
+    }
+
+    // fetch events and match event filters if they exist. If event filters do
+    // not match, entity would be dropped. We have to match filters locally
+    // as relevant HBase filters to filter out rows on the basis of events
+    // are not set in HBase scan.
+    boolean checkEvents =
+        !isSingleEntityRead() && filters.getEventFilters() != null &&
+        filters.getEventFilters().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.EVENTS) || checkEvents) {
+      readEvents(entity, result, ApplicationColumnPrefix.EVENT);
+      if (checkEvents && !TimelineStorageUtils.matchEventFilters(entity,
+          filters.getEventFilters())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+        entity.getEvents().clear();
+      }
+    }
+
+    // fetch metrics if fieldsToRetrieve contains METRICS or ALL.
+    if (hasField(fieldsToRetrieve, Field.METRICS)) {
+      readMetrics(entity, result, ApplicationColumnPrefix.METRIC);
+    }
+
+    ApplicationRowKey rowKey = ApplicationRowKey.parseRowKey(result.getRow());
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
+    return entity;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
new file mode 100644
index 0000000..ebe21a4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Set;
+import java.util.TreeSet;
+
+/**
+ * Timeline entity reader for listing all available entity types given one
+ * reader context. Right now only supports listing all entity types within one
+ * YARN application.
+ */
+public final class EntityTypeReader extends AbstractTimelineStorageReader {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(EntityTypeReader.class);
+  private static final EntityTableRW ENTITY_TABLE = new EntityTableRW();
+
+  public EntityTypeReader(TimelineReaderContext context) {
+    super(context);
+  }
+
+  /**
+   * Reads a set of timeline entity types from the HBase storage for the given
+   * context.
+   *
+   * @param hbaseConf HBase Configuration.
+   * @param conn HBase Connection.
+   * @return a set of <cite>TimelineEntity</cite> objects, with only type field
+   *         set.
+   * @throws IOException if any exception is encountered while reading entities.
+   */
+  public Set<String> readEntityTypes(Configuration hbaseConf,
+      Connection conn) throws IOException {
+
+    validateParams();
+    augmentParams(hbaseConf, conn);
+
+    Set<String> types = new TreeSet<>();
+    TimelineReaderContext context = getContext();
+    EntityRowKeyPrefix prefix = new EntityRowKeyPrefix(context.getClusterId(),
+        context.getUserId(), context.getFlowName(), context.getFlowRunId(),
+        context.getAppId());
+    byte[] currRowKey = prefix.getRowKeyPrefix();
+    byte[] nextRowKey = prefix.getRowKeyPrefix();
+    nextRowKey[nextRowKey.length - 1]++;
+
+    FilterList typeFilterList = new FilterList();
+    typeFilterList.addFilter(new FirstKeyOnlyFilter());
+    typeFilterList.addFilter(new KeyOnlyFilter());
+    typeFilterList.addFilter(new PageFilter(1));
+    LOG.debug("FilterList created for scan is - {}", typeFilterList);
+
+    int counter = 0;
+    while (true) {
+      try (ResultScanner results =
+          getResult(hbaseConf, conn, typeFilterList, currRowKey, nextRowKey)) {
+        TimelineEntity entity = parseEntityForType(results.next());
+        if (entity == null) {
+          break;
+        }
+        ++counter;
+        if (!types.add(entity.getType())) {
+          LOG.warn("Failed to add type " + entity.getType()
+              + " to the result set because there is a duplicated copy. ");
+        }
+        String currType = entity.getType();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Current row key: " + Arrays.toString(currRowKey));
+          LOG.debug("New entity type discovered: " + currType);
+        }
+        currRowKey = getNextRowKey(prefix.getRowKeyPrefix(), currType);
+      }
+    }
+    LOG.debug("Scanned {} records for {} types", counter, types.size());
+    return types;
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getAppId(),
+        "appId shouldn't be null");
+  }
+
+  /**
+   * Gets the possibly next row key prefix given current prefix and type.
+   *
+   * @param currRowKeyPrefix The current prefix that contains user, cluster,
+   *                         flow, run, and application id.
+   * @param entityType Current entity type.
+   * @return A new prefix for the possibly immediately next row key.
+   */
+  private static byte[] getNextRowKey(byte[] currRowKeyPrefix,
+      String entityType) {
+    if (currRowKeyPrefix == null || entityType == null) {
+      return null;
+    }
+
+    byte[] entityTypeEncoded = Separator.QUALIFIERS.join(
+        Separator.encode(entityType, Separator.SPACE, Separator.TAB,
+            Separator.QUALIFIERS),
+        Separator.EMPTY_BYTES);
+
+    byte[] currRowKey
+        = new byte[currRowKeyPrefix.length + entityTypeEncoded.length];
+    System.arraycopy(currRowKeyPrefix, 0, currRowKey, 0,
+        currRowKeyPrefix.length);
+    System.arraycopy(entityTypeEncoded, 0, currRowKey, currRowKeyPrefix.length,
+        entityTypeEncoded.length);
+
+    return HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+        currRowKey);
+  }
+
+  private ResultScanner getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList, byte[] startPrefix, byte[] endPrefix)
+      throws IOException {
+    Scan scan = new Scan(startPrefix, endPrefix);
+    scan.setFilter(filterList);
+    scan.setSmall(true);
+    return ENTITY_TABLE.getResultScanner(hbaseConf, conn, scan);
+  }
+
+  private TimelineEntity parseEntityForType(Result result)
+      throws IOException {
+    if (result == null || result.isEmpty()) {
+      return null;
+    }
+    TimelineEntity entity = new TimelineEntity();
+    EntityRowKey newRowKey = EntityRowKey.parseRowKey(result.getRow());
+    entity.setType(newRowKey.getEntityType());
+    return entity;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
new file mode 100644
index 0000000..d0a0f3b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for flow activity entities that are stored in the
+ * flow activity table.
+ */
+class FlowActivityEntityReader extends TimelineEntityReader {
+  private static final FlowActivityTableRW FLOW_ACTIVITY_TABLE =
+      new FlowActivityTableRW();
+
+  /**
+   * Used to convert Long key components to and from storage format.
+   */
+  private final KeyConverter<Long> longKeyConverter = new LongKeyConverter();
+
+
+  FlowActivityEntityReader(TimelineReaderContext ctxt,
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, entityFilters, toRetrieve);
+  }
+
+  FlowActivityEntityReader(TimelineReaderContext ctxt,
+      TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link FlowActivityTableRW}.
+   */
+  @Override
+  protected BaseTableRW<?> getTable() {
+    return FLOW_ACTIVITY_TABLE;
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+  }
+
+  @Override
+  protected void augmentParams(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    createFiltersIfNull();
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFilters() throws IOException {
+    return null;
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFields() {
+    return null;
+  }
+
+  @Override
+  protected Result getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    throw new UnsupportedOperationException(
+        "we don't support a single entity query");
+  }
+
+  @Override
+  protected ResultScanner getResults(Configuration hbaseConf,
+      Connection conn, FilterList filterList) throws IOException {
+    Scan scan = new Scan();
+    String clusterId = getContext().getClusterId();
+    if (getFilters().getFromId() == null
+        && getFilters().getCreatedTimeBegin() == 0L
+        && getFilters().getCreatedTimeEnd() == Long.MAX_VALUE) {
+       // All records have to be chosen.
+      scan.setRowPrefixFilter(new FlowActivityRowKeyPrefix(clusterId)
+          .getRowKeyPrefix());
+    } else if (getFilters().getFromId() != null) {
+      FlowActivityRowKey key = null;
+      try {
+        key =
+            FlowActivityRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!clusterId.equals(key.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + clusterId);
+      }
+      scan.setStartRow(key.getRowKey());
+      scan.setStopRow(
+          new FlowActivityRowKeyPrefix(clusterId,
+              (getFilters().getCreatedTimeBegin() <= 0 ? 0
+                  : (getFilters().getCreatedTimeBegin() - 1)))
+                      .getRowKeyPrefix());
+    } else {
+      scan.setStartRow(new FlowActivityRowKeyPrefix(clusterId, getFilters()
+          .getCreatedTimeEnd()).getRowKeyPrefix());
+      scan.setStopRow(new FlowActivityRowKeyPrefix(clusterId, (getFilters()
+          .getCreatedTimeBegin() <= 0 ? 0
+          : (getFilters().getCreatedTimeBegin() - 1))).getRowKeyPrefix());
+    }
+    // use the page filter to limit the result to the page size
+    // the scanner may still return more than the limit; therefore we need to
+    // read the right number as we iterate
+    scan.setFilter(new PageFilter(getFilters().getLimit()));
+    return getTable().getResultScanner(hbaseConf, conn, scan);
+  }
+
+  @Override
+  protected TimelineEntity parseEntity(Result result) throws IOException {
+    FlowActivityRowKey rowKey = FlowActivityRowKey.parseRowKey(result.getRow());
+
+    Long time = rowKey.getDayTimestamp();
+    String user = rowKey.getUserId();
+    String flowName = rowKey.getFlowName();
+
+    FlowActivityEntity flowActivity = new FlowActivityEntity(
+        getContext().getClusterId(), time, user, flowName);
+    // set the id
+    flowActivity.setId(flowActivity.getId());
+    // get the list of run ids along with the version that are associated with
+    // this flow on this day
+    Map<Long, Object> runIdsMap = ColumnRWHelper.readResults(result,
+        FlowActivityColumnPrefix.RUN_ID, longKeyConverter);
+    for (Map.Entry<Long, Object> e : runIdsMap.entrySet()) {
+      Long runId = e.getKey();
+      String version = (String)e.getValue();
+      FlowRunEntity flowRun = new FlowRunEntity();
+      flowRun.setUser(user);
+      flowRun.setName(flowName);
+      flowRun.setRunId(runId);
+      flowRun.setVersion(version);
+      // set the id
+      flowRun.setId(flowRun.getId());
+      flowActivity.addFlowRun(flowRun);
+    }
+    flowActivity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
+    return flowActivity;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
new file mode 100644
index 0000000..33a2cf6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.BinaryPrefixComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for flow run entities that are stored in the flow run
+ * table.
+ */
+class FlowRunEntityReader extends TimelineEntityReader {
+  private static final FlowRunTableRW FLOW_RUN_TABLE = new FlowRunTableRW();
+
+  FlowRunEntityReader(TimelineReaderContext ctxt,
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, entityFilters, toRetrieve);
+  }
+
+  FlowRunEntityReader(TimelineReaderContext ctxt,
+      TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link FlowRunTableRW}.
+   */
+  @Override
+  protected BaseTableRW<?> getTable() {
+    return FLOW_RUN_TABLE;
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
+    Preconditions.checkNotNull(getDataToRetrieve(),
+        "data to retrieve shouldn't be null");
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getUserId(),
+        "userId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getFlowName(),
+        "flowName shouldn't be null");
+    if (isSingleEntityRead()) {
+      Preconditions.checkNotNull(getContext().getFlowRunId(),
+          "flowRunId shouldn't be null");
+    }
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    if (!isSingleEntityRead() && fieldsToRetrieve != null) {
+      for (Field field : fieldsToRetrieve) {
+        if (field != Field.ALL && field != Field.METRICS) {
+          throw new BadRequestException("Invalid field " + field
+              + " specified while querying flow runs.");
+        }
+      }
+    }
+  }
+
+  @Override
+  protected void augmentParams(Configuration hbaseConf, Connection conn) {
+    // Add metrics to fields to retrieve if metricsToRetrieve is specified.
+    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
+    if (!isSingleEntityRead()) {
+      createFiltersIfNull();
+    }
+  }
+
+  protected FilterList constructFilterListBasedOnFilters() throws IOException {
+    FilterList listBasedOnFilters = new FilterList();
+    // Filter based on created time range.
+    Long createdTimeBegin = getFilters().getCreatedTimeBegin();
+    Long createdTimeEnd = getFilters().getCreatedTimeEnd();
+    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils
+          .createSingleColValueFiltersByRange(FlowRunColumn.MIN_START_TIME,
+              createdTimeBegin, createdTimeEnd));
+    }
+    // Filter based on metric filters.
+    TimelineFilterList metricFilters = getFilters().getMetricFilters();
+    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          FlowRunColumnPrefix.METRIC, metricFilters));
+    }
+    return listBasedOnFilters;
+  }
+
+  /**
+   * Add {@link QualifierFilter} filters to filter list for each column of flow
+   * run table.
+   *
+   * @return filter list to which qualifier filters have been added.
+   */
+  private FilterList updateFixedColumns() {
+    FilterList columnsList = new FilterList(Operator.MUST_PASS_ONE);
+    for (FlowRunColumn column : FlowRunColumn.values()) {
+      columnsList.addFilter(new QualifierFilter(CompareOp.EQUAL,
+          new BinaryComparator(column.getColumnQualifierBytes())));
+    }
+    return columnsList;
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFields() throws IOException {
+    FilterList list = new FilterList(Operator.MUST_PASS_ONE);
+    // By default fetch everything in INFO column family.
+    FamilyFilter infoColumnFamily =
+        new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(
+            FlowRunColumnFamily.INFO.getBytes()));
+    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
+    // If multiple entities have to be retrieved, check if metrics have to be
+    // retrieved and if not, add a filter so that metrics can be excluded.
+    // Metrics are always returned if we are reading a single entity.
+    if (!isSingleEntityRead()
+        && !hasField(dataToRetrieve.getFieldsToRetrieve(), Field.METRICS)) {
+      FilterList infoColFamilyList = new FilterList(Operator.MUST_PASS_ONE);
+      infoColFamilyList.addFilter(infoColumnFamily);
+      infoColFamilyList.addFilter(new QualifierFilter(CompareOp.NOT_EQUAL,
+          new BinaryPrefixComparator(FlowRunColumnPrefix.METRIC
+              .getColumnPrefixBytes(""))));
+      list.addFilter(infoColFamilyList);
+    } else {
+      // Check if metricsToRetrieve are specified and if they are, create a
+      // filter list for info column family by adding flow run tables columns
+      // and a list for metrics to retrieve. Pls note that fieldsToRetrieve
+      // will have METRICS added to it if metricsToRetrieve are specified
+      // (in augmentParams()).
+      TimelineFilterList metricsToRetrieve =
+          dataToRetrieve.getMetricsToRetrieve();
+      if (metricsToRetrieve != null
+          && !metricsToRetrieve.getFilterList().isEmpty()) {
+        FilterList infoColFamilyList = new FilterList();
+        infoColFamilyList.addFilter(infoColumnFamily);
+        FilterList columnsList = updateFixedColumns();
+        columnsList.addFilter(TimelineFilterUtils.createHBaseFilterList(
+            FlowRunColumnPrefix.METRIC, metricsToRetrieve));
+        infoColFamilyList.addFilter(columnsList);
+        list.addFilter(infoColFamilyList);
+      }
+    }
+    return list;
+  }
+
+  @Override
+  protected Result getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    TimelineReaderContext context = getContext();
+    FlowRunRowKey flowRunRowKey =
+        new FlowRunRowKey(context.getClusterId(), context.getUserId(),
+            context.getFlowName(), context.getFlowRunId());
+    byte[] rowKey = flowRunRowKey.getRowKey();
+    Get get = new Get(rowKey);
+    get.setMaxVersions(Integer.MAX_VALUE);
+    if (filterList != null && !filterList.getFilters().isEmpty()) {
+      get.setFilter(filterList);
+    }
+    return getTable().getResult(hbaseConf, conn, get);
+  }
+
+  @Override
+  protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    Scan scan = new Scan();
+    TimelineReaderContext context = getContext();
+    RowKeyPrefix<FlowRunRowKey> flowRunRowKeyPrefix = null;
+    if (getFilters().getFromId() == null) {
+      flowRunRowKeyPrefix = new FlowRunRowKeyPrefix(context.getClusterId(),
+          context.getUserId(), context.getFlowName());
+      scan.setRowPrefixFilter(flowRunRowKeyPrefix.getRowKeyPrefix());
+    } else {
+      FlowRunRowKey flowRunRowKey = null;
+      try {
+        flowRunRowKey =
+            FlowRunRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(flowRunRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+      // set start row
+      scan.setStartRow(flowRunRowKey.getRowKey());
+
+      // get the bytes for stop row
+      flowRunRowKeyPrefix = new FlowRunRowKeyPrefix(context.getClusterId(),
+          context.getUserId(), context.getFlowName());
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              flowRunRowKeyPrefix.getRowKeyPrefix()));
+    }
+
+    FilterList newList = new FilterList();
+    newList.addFilter(new PageFilter(getFilters().getLimit()));
+    if (filterList != null && !filterList.getFilters().isEmpty()) {
+      newList.addFilter(filterList);
+    }
+    scan.setFilter(newList);
+    scan.setMaxVersions(Integer.MAX_VALUE);
+    return getTable().getResultScanner(hbaseConf, conn, scan);
+  }
+
+  @Override
+  protected TimelineEntity parseEntity(Result result) throws IOException {
+    FlowRunEntity flowRun = new FlowRunEntity();
+    FlowRunRowKey rowKey = FlowRunRowKey.parseRowKey(result.getRow());
+    flowRun.setRunId(rowKey.getFlowRunId());
+    flowRun.setUser(rowKey.getUserId());
+    flowRun.setName(rowKey.getFlowName());
+
+    // read the start time
+    Long startTime = (Long) ColumnRWHelper.readResult(result,
+        FlowRunColumn.MIN_START_TIME);
+    if (startTime != null) {
+      flowRun.setStartTime(startTime.longValue());
+    }
+
+    // read the end time if available
+    Long endTime = (Long) ColumnRWHelper.readResult(result,
+        FlowRunColumn.MAX_END_TIME);
+    if (endTime != null) {
+      flowRun.setMaxEndTime(endTime.longValue());
+    }
+
+    // read the flow version
+    String version = (String) ColumnRWHelper.readResult(result,
+        FlowRunColumn.FLOW_VERSION);
+    if (version != null) {
+      flowRun.setVersion(version);
+    }
+
+    // read metrics if its a single entity query or if METRICS are part of
+    // fieldsToRetrieve.
+    if (isSingleEntityRead()
+        || hasField(getDataToRetrieve().getFieldsToRetrieve(), Field.METRICS)) {
+      readMetrics(flowRun, result, FlowRunColumnPrefix.METRIC);
+    }
+
+    // set the id
+    flowRun.setId(flowRun.getId());
+    flowRun.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        rowKey.getRowKeyAsString());
+    return flowRun;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
new file mode 100644
index 0000000..02eca84
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java
@@ -0,0 +1,655 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Query;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.FamilyFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.PageFilter;
+import org.apache.hadoop.hbase.filter.FilterList.Operator;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterList;
+import org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelineFilterUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTableRW;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumn;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKey;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityRowKeyPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Timeline entity reader for generic entities that are stored in the entity
+ * table.
+ */
+class GenericEntityReader extends TimelineEntityReader {
+  private static final EntityTableRW ENTITY_TABLE = new EntityTableRW();
+
+  /**
+   * Used to convert strings key components to and from storage format.
+   */
+  private final KeyConverter<String> stringKeyConverter =
+      new StringKeyConverter();
+
+  GenericEntityReader(TimelineReaderContext ctxt,
+      TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, entityFilters, toRetrieve);
+  }
+
+  GenericEntityReader(TimelineReaderContext ctxt,
+      TimelineDataToRetrieve toRetrieve) {
+    super(ctxt, toRetrieve);
+  }
+
+  /**
+   * Uses the {@link EntityTableRW}.
+   */
+  protected BaseTableRW<?> getTable() {
+    return ENTITY_TABLE;
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFilters() throws IOException {
+    // Filters here cannot be null for multiple entity reads as they are set in
+    // augmentParams if null.
+    FilterList listBasedOnFilters = new FilterList();
+    TimelineEntityFilters filters = getFilters();
+    // Create filter list based on created time range and add it to
+    // listBasedOnFilters.
+    long createdTimeBegin = filters.getCreatedTimeBegin();
+    long createdTimeEnd = filters.getCreatedTimeEnd();
+    if (createdTimeBegin != 0 || createdTimeEnd != Long.MAX_VALUE) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils
+          .createSingleColValueFiltersByRange(EntityColumn.CREATED_TIME,
+              createdTimeBegin, createdTimeEnd));
+    }
+    // Create filter list based on metric filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList metricFilters = filters.getMetricFilters();
+    if (metricFilters != null && !metricFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          EntityColumnPrefix.METRIC, metricFilters));
+    }
+    // Create filter list based on config filters and add it to
+    // listBasedOnFilters.
+    TimelineFilterList configFilters = filters.getConfigFilters();
+    if (configFilters != null && !configFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          EntityColumnPrefix.CONFIG, configFilters));
+    }
+    // Create filter list based on info filters and add it to listBasedOnFilters
+    TimelineFilterList infoFilters = filters.getInfoFilters();
+    if (infoFilters != null && !infoFilters.getFilterList().isEmpty()) {
+      listBasedOnFilters.addFilter(TimelineFilterUtils.createHBaseFilterList(
+          EntityColumnPrefix.INFO, infoFilters));
+    }
+    return listBasedOnFilters;
+  }
+
+  /**
+   * Check if we need to fetch only some of the event columns.
+   *
+   * @return true if we need to fetch some of the columns, false otherwise.
+   */
+  protected boolean fetchPartialEventCols(TimelineFilterList eventFilters,
+      EnumSet<Field> fieldsToRetrieve) {
+    return (eventFilters != null && !eventFilters.getFilterList().isEmpty() &&
+        !hasField(fieldsToRetrieve, Field.EVENTS));
+  }
+
+  /**
+   * Check if we need to fetch only some of the relates_to columns.
+   *
+   * @return true if we need to fetch some of the columns, false otherwise.
+   */
+  protected boolean fetchPartialRelatesToCols(TimelineFilterList relatesTo,
+      EnumSet<Field> fieldsToRetrieve) {
+    return (relatesTo != null && !relatesTo.getFilterList().isEmpty() &&
+        !hasField(fieldsToRetrieve, Field.RELATES_TO));
+  }
+
+  /**
+   * Check if we need to fetch only some of the is_related_to columns.
+   *
+   * @return true if we need to fetch some of the columns, false otherwise.
+   */
+  private boolean fetchPartialIsRelatedToCols(TimelineFilterList isRelatedTo,
+      EnumSet<Field> fieldsToRetrieve) {
+    return (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty() &&
+        !hasField(fieldsToRetrieve, Field.IS_RELATED_TO));
+  }
+
+  /**
+   * Check if we need to fetch only some of the columns based on event filters,
+   * relatesto and isrelatedto from info family.
+   *
+   * @return true, if we need to fetch only some of the columns, false if we
+   *         need to fetch all the columns under info column family.
+   */
+  protected boolean fetchPartialColsFromInfoFamily() {
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    TimelineEntityFilters filters = getFilters();
+    return fetchPartialEventCols(filters.getEventFilters(), fieldsToRetrieve)
+        || fetchPartialRelatesToCols(filters.getRelatesTo(), fieldsToRetrieve)
+        || fetchPartialIsRelatedToCols(filters.getIsRelatedTo(),
+            fieldsToRetrieve);
+  }
+
+  /**
+   * Check if we need to create filter list based on fields. We need to create a
+   * filter list iff all fields need not be retrieved or we have some specific
+   * fields or metrics to retrieve. We also need to create a filter list if we
+   * have relationships(relatesTo/isRelatedTo) and event filters specified for
+   * the query.
+   *
+   * @return true if we need to create the filter list, false otherwise.
+   */
+  protected boolean needCreateFilterListBasedOnFields() {
+    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
+    // Check if all fields are to be retrieved or not. If all fields have to
+    // be retrieved, also check if we have some metrics or configs to
+    // retrieve specified for the query because then a filter list will have
+    // to be created.
+    boolean flag =
+        !dataToRetrieve.getFieldsToRetrieve().contains(Field.ALL)
+            || (dataToRetrieve.getConfsToRetrieve() != null && !dataToRetrieve
+                .getConfsToRetrieve().getFilterList().isEmpty())
+            || (dataToRetrieve.getMetricsToRetrieve() != null && !dataToRetrieve
+                .getMetricsToRetrieve().getFilterList().isEmpty());
+    // Filters need to be checked only if we are reading multiple entities. If
+    // condition above is false, we check if there are relationships(relatesTo/
+    // isRelatedTo) and event filters specified for the query.
+    if (!flag && !isSingleEntityRead()) {
+      TimelineEntityFilters filters = getFilters();
+      flag =
+          (filters.getEventFilters() != null && !filters.getEventFilters()
+              .getFilterList().isEmpty())
+              || (filters.getIsRelatedTo() != null && !filters.getIsRelatedTo()
+                  .getFilterList().isEmpty())
+              || (filters.getRelatesTo() != null && !filters.getRelatesTo()
+                  .getFilterList().isEmpty());
+    }
+    return flag;
+  }
+
+  /**
+   * Add {@link QualifierFilter} filters to filter list for each column of
+   * entity table.
+   *
+   * @param list filter list to which qualifier filters have to be added.
+   */
+  protected void updateFixedColumns(FilterList list) {
+    for (EntityColumn column : EntityColumn.values()) {
+      list.addFilter(new QualifierFilter(CompareOp.EQUAL, new BinaryComparator(
+          column.getColumnQualifierBytes())));
+    }
+  }
+
+  /**
+   * Creates a filter list which indicates that only some of the column
+   * qualifiers in the info column family will be returned in result.
+   *
+   * @param isApplication If true, it means operations are to be performed for
+   *          application table, otherwise for entity table.
+   * @return filter list.
+   * @throws IOException if any problem occurs while creating filter list.
+   */
+  private FilterList createFilterListForColsOfInfoFamily() throws IOException {
+    FilterList infoFamilyColsFilter = new FilterList(Operator.MUST_PASS_ONE);
+    // Add filters for each column in entity table.
+    updateFixedColumns(infoFamilyColsFilter);
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // If INFO field has to be retrieved, add a filter for fetching columns
+    // with INFO column prefix.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      infoFamilyColsFilter
+          .addFilter(TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.EQUAL, EntityColumnPrefix.INFO));
+    }
+    TimelineFilterList relatesTo = getFilters().getRelatesTo();
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      // If RELATES_TO field has to be retrieved, add a filter for fetching
+      // columns with RELATES_TO column prefix.
+      infoFamilyColsFilter.addFilter(TimelineFilterUtils
+          .createHBaseQualifierFilter(CompareOp.EQUAL,
+              EntityColumnPrefix.RELATES_TO));
+    } else if (relatesTo != null && !relatesTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain RELATES_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // relatesTo filters are specified. relatesTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> relatesToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(relatesTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          EntityColumnPrefix.RELATES_TO, relatesToCols));
+    }
+    TimelineFilterList isRelatedTo = getFilters().getIsRelatedTo();
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      // If IS_RELATED_TO field has to be retrieved, add a filter for fetching
+      // columns with IS_RELATED_TO column prefix.
+      infoFamilyColsFilter.addFilter(TimelineFilterUtils
+          .createHBaseQualifierFilter(CompareOp.EQUAL,
+              EntityColumnPrefix.IS_RELATED_TO));
+    } else if (isRelatedTo != null && !isRelatedTo.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain IS_RELATED_TO, we still
+      // need to have a filter to fetch some of the column qualifiers if
+      // isRelatedTo filters are specified. isRelatedTo filters will then be
+      // matched after fetching rows from HBase.
+      Set<String> isRelatedToCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(isRelatedTo);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          EntityColumnPrefix.IS_RELATED_TO, isRelatedToCols));
+    }
+    TimelineFilterList eventFilters = getFilters().getEventFilters();
+    if (hasField(fieldsToRetrieve, Field.EVENTS)) {
+      // If EVENTS field has to be retrieved, add a filter for fetching columns
+      // with EVENT column prefix.
+      infoFamilyColsFilter
+          .addFilter(TimelineFilterUtils.createHBaseQualifierFilter(
+              CompareOp.EQUAL, EntityColumnPrefix.EVENT));
+    } else if (eventFilters != null &&
+        !eventFilters.getFilterList().isEmpty()) {
+      // Even if fields to retrieve does not contain EVENTS, we still need to
+      // have a filter to fetch some of the column qualifiers on the basis of
+      // event filters specified. Event filters will then be matched after
+      // fetching rows from HBase.
+      Set<String> eventCols =
+          TimelineFilterUtils.fetchColumnsFromFilterList(eventFilters);
+      infoFamilyColsFilter.addFilter(createFiltersFromColumnQualifiers(
+          EntityColumnPrefix.EVENT, eventCols));
+    }
+    return infoFamilyColsFilter;
+  }
+
+  /**
+   * Exclude column prefixes via filters which are not required(based on fields
+   * to retrieve) from info column family. These filters are added to filter
+   * list which contains a filter for getting info column family.
+   *
+   * @param infoColFamilyList filter list for info column family.
+   */
+  private void excludeFieldsFromInfoColFamily(FilterList infoColFamilyList) {
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // Events not required.
+    if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+      infoColFamilyList.addFilter(TimelineFilterUtils
+          .createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              EntityColumnPrefix.EVENT));
+    }
+    // info not required.
+    if (!hasField(fieldsToRetrieve, Field.INFO)) {
+      infoColFamilyList.addFilter(TimelineFilterUtils
+          .createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              EntityColumnPrefix.INFO));
+    }
+    // is related to not required.
+    if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+      infoColFamilyList.addFilter(TimelineFilterUtils
+          .createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              EntityColumnPrefix.IS_RELATED_TO));
+    }
+    // relates to not required.
+    if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+      infoColFamilyList.addFilter(TimelineFilterUtils
+          .createHBaseQualifierFilter(CompareOp.NOT_EQUAL,
+              EntityColumnPrefix.RELATES_TO));
+    }
+  }
+
+  /**
+   * Updates filter list based on fields for confs and metrics to retrieve.
+   *
+   * @param listBasedOnFields filter list based on fields.
+   * @throws IOException if any problem occurs while updating filter list.
+   */
+  private void updateFilterForConfsAndMetricsToRetrieve(
+      FilterList listBasedOnFields) throws IOException {
+    TimelineDataToRetrieve dataToRetrieve = getDataToRetrieve();
+    // Please note that if confsToRetrieve is specified, we would have added
+    // CONFS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.CONFIGS)) {
+      // Create a filter list for configs.
+      listBasedOnFields.addFilter(TimelineFilterUtils
+          .createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getConfsToRetrieve(), EntityColumnFamily.CONFIGS,
+              EntityColumnPrefix.CONFIG));
+    }
+
+    // Please note that if metricsToRetrieve is specified, we would have added
+    // METRICS to fields to retrieve in augmentParams() even if not specified.
+    if (dataToRetrieve.getFieldsToRetrieve().contains(Field.METRICS)) {
+      // Create a filter list for metrics.
+      listBasedOnFields.addFilter(TimelineFilterUtils
+          .createFilterForConfsOrMetricsToRetrieve(
+              dataToRetrieve.getMetricsToRetrieve(),
+              EntityColumnFamily.METRICS, EntityColumnPrefix.METRIC));
+    }
+  }
+
+  @Override
+  protected FilterList constructFilterListBasedOnFields() throws IOException {
+    if (!needCreateFilterListBasedOnFields()) {
+      // Fetch all the columns. No need of a filter.
+      return null;
+    }
+    FilterList listBasedOnFields = new FilterList(Operator.MUST_PASS_ONE);
+    FilterList infoColFamilyList = new FilterList();
+    // By default fetch everything in INFO column family.
+    FamilyFilter infoColumnFamily =
+        new FamilyFilter(CompareOp.EQUAL, new BinaryComparator(
+            EntityColumnFamily.INFO.getBytes()));
+    infoColFamilyList.addFilter(infoColumnFamily);
+    if (!isSingleEntityRead() && fetchPartialColsFromInfoFamily()) {
+      // We can fetch only some of the columns from info family.
+      infoColFamilyList.addFilter(createFilterListForColsOfInfoFamily());
+    } else {
+      // Exclude column prefixes in info column family which are not required
+      // based on fields to retrieve.
+      excludeFieldsFromInfoColFamily(infoColFamilyList);
+    }
+    listBasedOnFields.addFilter(infoColFamilyList);
+    updateFilterForConfsAndMetricsToRetrieve(listBasedOnFields);
+    return listBasedOnFields;
+  }
+
+  @Override
+  protected void validateParams() {
+    Preconditions.checkNotNull(getContext(), "context shouldn't be null");
+    Preconditions.checkNotNull(getDataToRetrieve(),
+        "data to retrieve shouldn't be null");
+    Preconditions.checkNotNull(getContext().getClusterId(),
+        "clusterId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getAppId(),
+        "appId shouldn't be null");
+    Preconditions.checkNotNull(getContext().getEntityType(),
+        "entityType shouldn't be null");
+    if (isSingleEntityRead()) {
+      Preconditions.checkNotNull(getContext().getEntityId(),
+          "entityId shouldn't be null");
+    }
+  }
+
+  @Override
+  protected void augmentParams(Configuration hbaseConf, Connection conn)
+      throws IOException {
+    defaultAugmentParams(hbaseConf, conn);
+    // Add configs/metrics to fields to retrieve if confsToRetrieve and/or
+    // metricsToRetrieve are specified.
+    getDataToRetrieve().addFieldsBasedOnConfsAndMetricsToRetrieve();
+    if (!isSingleEntityRead()) {
+      createFiltersIfNull();
+    }
+  }
+
+  @Override
+  protected Result getResult(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    TimelineReaderContext context = getContext();
+    Result result = null;
+    if (context.getEntityIdPrefix() != null) {
+      byte[] rowKey = new EntityRowKey(context.getClusterId(),
+          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
+          context.getAppId(), context.getEntityType(),
+          context.getEntityIdPrefix(), context.getEntityId()).getRowKey();
+      Get get = new Get(rowKey);
+      setMetricsTimeRange(get);
+      get.setMaxVersions(getDataToRetrieve().getMetricsLimit());
+      if (filterList != null && !filterList.getFilters().isEmpty()) {
+        get.setFilter(filterList);
+      }
+      result = getTable().getResult(hbaseConf, conn, get);
+
+    } else {
+      // Prepare for range scan
+      // create single SingleColumnValueFilter and add to existing filters.
+      FilterList filter = new FilterList(Operator.MUST_PASS_ALL);
+      if (filterList != null && !filterList.getFilters().isEmpty()) {
+        filter.addFilter(filterList);
+      }
+      FilterList newFilter = new FilterList();
+      newFilter.addFilter(TimelineFilterUtils.createHBaseSingleColValueFilter(
+          EntityColumn.ID, context.getEntityId(), CompareOp.EQUAL));
+      newFilter.addFilter(new PageFilter(1));
+      filter.addFilter(newFilter);
+
+      ResultScanner results = getResults(hbaseConf, conn, filter);
+      try {
+        Iterator<Result> iterator = results.iterator();
+        if (iterator.hasNext()) {
+          result = iterator.next();
+        }
+      } finally {
+        results.close();
+      }
+    }
+    return result;
+  }
+
+  private void setMetricsTimeRange(Query query) {
+    // Set time range for metric values.
+    HBaseTimelineStorageUtils.setMetricsTimeRange(
+        query, EntityColumnFamily.METRICS.getBytes(),
+        getDataToRetrieve().getMetricsTimeBegin(),
+        getDataToRetrieve().getMetricsTimeEnd());
+  }
+
+  @Override
+  protected ResultScanner getResults(Configuration hbaseConf, Connection conn,
+      FilterList filterList) throws IOException {
+    // Scan through part of the table to find the entities belong to one app
+    // and one type
+    Scan scan = new Scan();
+    TimelineReaderContext context = getContext();
+    RowKeyPrefix<EntityRowKey> entityRowKeyPrefix = null;
+    // default mode, will always scans from beginning of entity type.
+    if (getFilters() == null || getFilters().getFromId() == null) {
+      entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
+          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
+          context.getAppId(), context.getEntityType(), null, null);
+      scan.setRowPrefixFilter(entityRowKeyPrefix.getRowKeyPrefix());
+    } else { // pagination mode, will scan from given entityIdPrefix!enitityId
+
+      EntityRowKey entityRowKey = null;
+      try {
+        entityRowKey =
+            EntityRowKey.parseRowKeyFromString(getFilters().getFromId());
+      } catch (IllegalArgumentException e) {
+        throw new BadRequestException("Invalid filter fromid is provided.");
+      }
+      if (!context.getClusterId().equals(entityRowKey.getClusterId())) {
+        throw new BadRequestException(
+            "fromid doesn't belong to clusterId=" + context.getClusterId());
+      }
+
+      // set start row
+      scan.setStartRow(entityRowKey.getRowKey());
+
+      // get the bytes for stop row
+      entityRowKeyPrefix = new EntityRowKeyPrefix(context.getClusterId(),
+          context.getUserId(), context.getFlowName(), context.getFlowRunId(),
+          context.getAppId(), context.getEntityType(), null, null);
+
+      // set stop row
+      scan.setStopRow(
+          HBaseTimelineStorageUtils.calculateTheClosestNextRowKeyForPrefix(
+              entityRowKeyPrefix.getRowKeyPrefix()));
+
+      // set page filter to limit. This filter has to set only in pagination
+      // mode.
+      filterList.addFilter(new PageFilter(getFilters().getLimit()));
+    }
+    setMetricsTimeRange(scan);
+    scan.setMaxVersions(getDataToRetrieve().getMetricsLimit());
+    if (filterList != null && !filterList.getFilters().isEmpty()) {
+      scan.setFilter(filterList);
+    }
+    return getTable().getResultScanner(hbaseConf, conn, scan);
+  }
+
+  @Override
+  protected TimelineEntity parseEntity(Result result) throws IOException {
+    if (result == null || result.isEmpty()) {
+      return null;
+    }
+    TimelineEntity entity = new TimelineEntity();
+    EntityRowKey parseRowKey = EntityRowKey.parseRowKey(result.getRow());
+    entity.setType(parseRowKey.getEntityType());
+    entity.setId(parseRowKey.getEntityId());
+    entity.setIdPrefix(parseRowKey.getEntityIdPrefix().longValue());
+
+    TimelineEntityFilters filters = getFilters();
+    // fetch created time
+    Long createdTime = (Long) ColumnRWHelper.readResult(result,
+        EntityColumn.CREATED_TIME);
+    entity.setCreatedTime(createdTime);
+
+    EnumSet<Field> fieldsToRetrieve = getDataToRetrieve().getFieldsToRetrieve();
+    // fetch is related to entities and match isRelatedTo filter. If isRelatedTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // isRelatedTo are not set in HBase scan.
+    boolean checkIsRelatedTo =
+        !isSingleEntityRead() && filters.getIsRelatedTo() != null
+            && filters.getIsRelatedTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.IS_RELATED_TO) || checkIsRelatedTo) {
+      readRelationship(entity, result, EntityColumnPrefix.IS_RELATED_TO, true);
+      if (checkIsRelatedTo
+          && !TimelineStorageUtils.matchIsRelatedTo(entity,
+              filters.getIsRelatedTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.IS_RELATED_TO)) {
+        entity.getIsRelatedToEntities().clear();
+      }
+    }
+
+    // fetch relates to entities and match relatesTo filter. If relatesTo
+    // filters do not match, entity would be dropped. We have to match filters
+    // locally as relevant HBase filters to filter out rows on the basis of
+    // relatesTo are not set in HBase scan.
+    boolean checkRelatesTo =
+        !isSingleEntityRead() && filters.getRelatesTo() != null
+            && filters.getRelatesTo().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.RELATES_TO)
+        || checkRelatesTo) {
+      readRelationship(entity, result, EntityColumnPrefix.RELATES_TO, false);
+      if (checkRelatesTo
+          && !TimelineStorageUtils.matchRelatesTo(entity,
+              filters.getRelatesTo())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.RELATES_TO)) {
+        entity.getRelatesToEntities().clear();
+      }
+    }
+
+    // fetch info if fieldsToRetrieve contains INFO or ALL.
+    if (hasField(fieldsToRetrieve, Field.INFO)) {
+      readKeyValuePairs(entity, result, EntityColumnPrefix.INFO, false);
+    }
+
+    // fetch configs if fieldsToRetrieve contains CONFIGS or ALL.
+    if (hasField(fieldsToRetrieve, Field.CONFIGS)) {
+      readKeyValuePairs(entity, result, EntityColumnPrefix.CONFIG, true);
+    }
+
+    // fetch events and match event filters if they exist. If event filters do
+    // not match, entity would be dropped. We have to match filters locally
+    // as relevant HBase filters to filter out rows on the basis of events
+    // are not set in HBase scan.
+    boolean checkEvents =
+        !isSingleEntityRead() && filters.getEventFilters() != null
+            && filters.getEventFilters().getFilterList().size() > 0;
+    if (hasField(fieldsToRetrieve, Field.EVENTS) || checkEvents) {
+      readEvents(entity, result, EntityColumnPrefix.EVENT);
+      if (checkEvents
+          && !TimelineStorageUtils.matchEventFilters(entity,
+              filters.getEventFilters())) {
+        return null;
+      }
+      if (!hasField(fieldsToRetrieve, Field.EVENTS)) {
+        entity.getEvents().clear();
+      }
+    }
+
+    // fetch metrics if fieldsToRetrieve contains METRICS or ALL.
+    if (hasField(fieldsToRetrieve, Field.METRICS)) {
+      readMetrics(entity, result, EntityColumnPrefix.METRIC);
+    }
+
+    entity.getInfo().put(TimelineReaderUtils.FROMID_KEY,
+        parseRowKey.getRowKeyAsString());
+    return entity;
+  }
+
+  /**
+   * Helper method for reading key-value pairs for either info or config.
+   *
+   * @param <T> Describes the type of column prefix.
+   * @param entity entity to fill.
+   * @param result result from HBase.
+   * @param prefix column prefix.
+   * @param isConfig if true, means we are reading configs, otherwise info.
+   * @throws IOException if any problem is encountered while reading result.
+   */
+  protected <T extends BaseTable<T>> void readKeyValuePairs(
+      TimelineEntity entity, Result result,
+      ColumnPrefix<T> prefix, boolean isConfig) throws IOException {
+    // info and configuration are of type Map<String, Object or String>
+    Map<String, Object> columns =
+        ColumnRWHelper.readResults(result, prefix, stringKeyConverter);
+    if (isConfig) {
+      for (Map.Entry<String, Object> column : columns.entrySet()) {
+        entity.addConfig(column.getKey(), column.getValue().toString());
+      }
+    } else {
+      entity.addInfo(columns);
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: YARN-7919. Refactor timelineservice-hbase module into submodules. Contributed by Haibo Chen.

Posted by ha...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
new file mode 100644
index 0000000..bb0e331
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.entity
+ * contains classes related to implementation for entity table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.entity;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
new file mode 100644
index 0000000..4e2cf2d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationCompactionDimension.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Identifies the compaction dimensions for the data in the {@link FlowRunTable}
+ * .
+ */
+public enum AggregationCompactionDimension {
+
+  /**
+   * the application id.
+   */
+  APPLICATION_ID((byte) 101);
+
+  private byte tagType;
+  private byte[] inBytes;
+
+  private AggregationCompactionDimension(byte tagType) {
+    this.tagType = tagType;
+    this.inBytes = Bytes.toBytes(this.name());
+  }
+
+  public Attribute getAttribute(String attributeValue) {
+    return new Attribute(this.name(), Bytes.toBytes(attributeValue));
+  }
+
+  public byte getTagType() {
+    return tagType;
+  }
+
+  public byte[] getInBytes() {
+    return this.inBytes.clone();
+  }
+
+  public static AggregationCompactionDimension
+      getAggregationCompactionDimension(String aggCompactDimStr) {
+    for (AggregationCompactionDimension aggDim : AggregationCompactionDimension
+        .values()) {
+      if (aggDim.name().equals(aggCompactDimStr)) {
+        return aggDim;
+      }
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java
new file mode 100644
index 0000000..40cdd2c
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/AggregationOperation.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Identifies the attributes to be set for puts into the {@link FlowRunTable}.
+ * The numbers used for tagType are prime numbers.
+ */
+public enum AggregationOperation {
+
+  /**
+   * When the flow was started.
+   */
+  GLOBAL_MIN((byte) 71),
+
+  /**
+   * When it ended.
+   */
+  GLOBAL_MAX((byte) 73),
+
+  /**
+   * The metrics of the flow.
+   */
+  SUM((byte) 79),
+
+  /**
+   * application running.
+   */
+  SUM_FINAL((byte) 83),
+
+  /**
+   * Min value as per the latest timestamp
+   * seen for a given app.
+   */
+  LATEST_MIN((byte) 89),
+
+  /**
+   * Max value as per the latest timestamp
+   * seen for a given app.
+   */
+  LATEST_MAX((byte) 97);
+
+  private byte tagType;
+  private byte[] inBytes;
+
+  private AggregationOperation(byte tagType) {
+    this.tagType = tagType;
+    this.inBytes = Bytes.toBytes(this.name());
+  }
+
+  public Attribute getAttribute() {
+    return new Attribute(this.name(), this.inBytes);
+  }
+
+  public byte getTagType() {
+    return tagType;
+  }
+
+  public byte[] getInBytes() {
+    return this.inBytes.clone();
+  }
+
+  /**
+   * returns the AggregationOperation enum that represents that string.
+   * @param aggOpStr Aggregation operation.
+   * @return the AggregationOperation enum that represents that string
+   */
+  public static AggregationOperation getAggregationOperation(String aggOpStr) {
+    for (AggregationOperation aggOp : AggregationOperation.values()) {
+      if (aggOp.name().equals(aggOpStr)) {
+        return aggOp;
+      }
+    }
+    return null;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java
new file mode 100644
index 0000000..d3de518
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/Attribute.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+/**
+ * Defines the attribute tuple to be set for puts into the {@link FlowRunTable}.
+ */
+public class Attribute {
+  private final String name;
+  private final byte[] value;
+
+  public Attribute(String name, byte[] value) {
+    this.name = name;
+    this.value = value.clone();
+  }
+
+  public String getName() {
+    return name;
+  }
+
+  public byte[] getValue() {
+    return value.clone();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
new file mode 100644
index 0000000..f9eb5b4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnFamily.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the flow run table column families.
+ */
+public enum FlowActivityColumnFamily
+    implements ColumnFamily<FlowActivityTable> {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value
+   *          create a column family with this name. Must be lower case and
+   *          without spaces.
+   */
+  private FlowActivityColumnFamily(String value) {
+    // column families should be lower case and not contain any spaces.
+    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+    return Bytes.copy(bytes);
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
new file mode 100644
index 0000000..f468f0b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityColumnPrefix.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+/**
+ * Identifies partially qualified columns for the {@link FlowActivityTable}.
+ */
+public enum FlowActivityColumnPrefix
+    implements ColumnPrefix<FlowActivityTable> {
+
+  /**
+   * To store run ids of the flows.
+   */
+  RUN_ID(FlowActivityColumnFamily.INFO, "r", null);
+
+  private final ColumnFamily<FlowActivityTable> columnFamily;
+  private final ValueConverter valueConverter;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+
+  private final AggregationOperation aggOp;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily
+   *          that this column is stored in.
+   * @param columnPrefix
+   *          for this column.
+   */
+  private FlowActivityColumnPrefix(
+      ColumnFamily<FlowActivityTable> columnFamily, String columnPrefix,
+      AggregationOperation aggOp) {
+    this(columnFamily, columnPrefix, aggOp, false);
+  }
+
+  private FlowActivityColumnPrefix(
+      ColumnFamily<FlowActivityTable> columnFamily, String columnPrefix,
+      AggregationOperation aggOp, boolean compoundColQual) {
+    this.valueConverter = GenericConverter.getInstance();
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes = Bytes.toBytes(Separator.SPACE
+          .encode(columnPrefix));
+    }
+    this.aggOp = aggOp;
+  }
+
+  /**
+   * @return the column name value
+   */
+  public String getColumnPrefix() {
+    return columnPrefix;
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  public byte[] getColumnPrefixBytes() {
+    return columnPrefixBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public byte[] getColumnPrefixInBytes() {
+    return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return HBaseTimelineSchemaUtils.combineAttributes(attributes, aggOp);
+  }
+
+  @Override
+  public boolean supplementCellTimeStamp() {
+    return false;
+  }
+
+  public AggregationOperation getAttribute() {
+    return aggOp;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
new file mode 100644
index 0000000..747f6ab
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKey.java
@@ -0,0 +1,247 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow activity table.
+ */
+public class FlowActivityRowKey {
+
+  private final String clusterId;
+  private final Long dayTs;
+  private final String userId;
+  private final String flowName;
+  private final FlowActivityRowKeyConverter
+      flowActivityRowKeyConverter = new FlowActivityRowKeyConverter();
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param dayTs to be converted to the top of the day timestamp
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   */
+  public FlowActivityRowKey(String clusterId, Long dayTs, String userId,
+      String flowName) {
+    this(clusterId, dayTs, userId, flowName, true);
+  }
+
+  /**
+   * @param clusterId identifying the cluster
+   * @param timestamp when the flow activity happened. May be converted to the
+   *          top of the day depending on the convertDayTsToTopOfDay argument.
+   * @param userId identifying user
+   * @param flowName identifying the flow
+   * @param convertDayTsToTopOfDay if true and timestamp isn't null, then
+   *          timestamp will be converted to the top-of-the day timestamp
+   */
+  protected FlowActivityRowKey(String clusterId, Long timestamp, String userId,
+      String flowName, boolean convertDayTsToTopOfDay) {
+    this.clusterId = clusterId;
+    if (convertDayTsToTopOfDay && (timestamp != null)) {
+      this.dayTs = HBaseTimelineSchemaUtils.getTopOfTheDayTimestamp(timestamp);
+    } else {
+      this.dayTs = timestamp;
+    }
+    this.userId = userId;
+    this.flowName = flowName;
+  }
+
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  public Long getDayTimestamp() {
+    return dayTs;
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  public String getFlowName() {
+    return flowName;
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   *
+   * @return byte array for the row key
+   */
+  public byte[] getRowKey() {
+    return flowActivityRowKeyConverter.encode(this);
+  }
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   *
+   * @param rowKey Byte representation of row key.
+   * @return A <cite>FlowActivityRowKey</cite> object.
+   */
+  public static FlowActivityRowKey parseRowKey(byte[] rowKey) {
+    return new FlowActivityRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!user!flowName}.
+   * @return String representation of row key
+   */
+  public String getRowKeyAsString() {
+    return flowActivityRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the raw row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>FlowActivityRowKey</cite> object.
+   */
+  public static FlowActivityRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new FlowActivityRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
+   * Encodes and decodes row key for flow activity table. The row key is of the
+   * form : clusterId!dayTimestamp!user!flowName. dayTimestamp(top of the day
+   * timestamp) is a long and rest are strings.
+   * <p>
+   */
+  final private static class FlowActivityRowKeyConverter
+      implements KeyConverter<FlowActivityRowKey>,
+      KeyConverterToString<FlowActivityRowKey> {
+
+    private FlowActivityRowKeyConverter() {
+    }
+
+    /**
+     * The flow activity row key is of the form
+     * clusterId!dayTimestamp!user!flowName with each segment separated by !.
+     * The sizes below indicate sizes of each one of these segements in
+     * sequence. clusterId, user and flowName are strings. Top of the day
+     * timestamp is a long hence 8 bytes in size. Strings are variable in size
+     * (i.e. they end whenever separator is encountered). This is used while
+     * decoding and helps in determining where to split.
+     */
+    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
+        Bytes.SIZEOF_LONG, Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE };
+
+    /*
+     * (non-Javadoc)
+     *
+     * Encodes FlowActivityRowKey object into a byte array with each
+     * component/field in FlowActivityRowKey separated by Separator#QUALIFIERS.
+     * This leads to an flow activity table row key of the form
+     * clusterId!dayTimestamp!user!flowName. If dayTimestamp in passed
+     * FlowActivityRowKey object is null and clusterId is not null, then this
+     * returns a row key prefix as clusterId! and if userId in
+     * FlowActivityRowKey is null (and the fields preceding it i.e. clusterId
+     * and dayTimestamp are not null), this returns a row key prefix as
+     * clusterId!dayTimeStamp! dayTimestamp is inverted while encoding as it
+     * helps maintain a descending order for row keys in flow activity table.
+     *
+     * @see org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#encode(java.lang.Object)
+     */
+    @Override
+    public byte[] encode(FlowActivityRowKey rowKey) {
+      if (rowKey.getDayTimestamp() == null) {
+        return Separator.QUALIFIERS.join(Separator.encode(
+            rowKey.getClusterId(), Separator.SPACE, Separator.TAB,
+            Separator.QUALIFIERS), Separator.EMPTY_BYTES);
+      }
+      if (rowKey.getUserId() == null) {
+        return Separator.QUALIFIERS.join(Separator.encode(
+            rowKey.getClusterId(), Separator.SPACE, Separator.TAB,
+            Separator.QUALIFIERS), Bytes.toBytes(LongConverter
+            .invertLong(rowKey.getDayTimestamp())), Separator.EMPTY_BYTES);
+      }
+      return Separator.QUALIFIERS.join(Separator.encode(rowKey.getClusterId(),
+          Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Bytes
+          .toBytes(LongConverter.invertLong(rowKey.getDayTimestamp())),
+          Separator.encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
+              Separator.QUALIFIERS), Separator.encode(rowKey.getFlowName(),
+              Separator.SPACE, Separator.TAB, Separator.QUALIFIERS));
+    }
+
+    /*
+     * (non-Javadoc)
+     *
+     * @see
+     * org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#decode(byte[])
+     */
+    @Override
+    public FlowActivityRowKey decode(byte[] rowKey) {
+      byte[][] rowKeyComponents =
+          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
+      if (rowKeyComponents.length != 4) {
+        throw new IllegalArgumentException("the row key is not valid for "
+            + "a flow activity");
+      }
+      String clusterId =
+          Separator.decode(Bytes.toString(rowKeyComponents[0]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      Long dayTs = LongConverter.invertLong(Bytes.toLong(rowKeyComponents[1]));
+      String userId =
+          Separator.decode(Bytes.toString(rowKeyComponents[2]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String flowName =
+          Separator.decode(Bytes.toString(rowKeyComponents[3]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      return new FlowActivityRowKey(clusterId, dayTs, userId, flowName);
+    }
+
+    @Override
+    public String encodeAsString(FlowActivityRowKey key) {
+      if (key.getDayTimestamp() == null) {
+        return TimelineReaderUtils
+            .joinAndEscapeStrings(new String[] {key.clusterId});
+      } else if (key.getUserId() == null) {
+        return TimelineReaderUtils.joinAndEscapeStrings(
+            new String[] {key.clusterId, key.dayTs.toString()});
+      } else if (key.getFlowName() == null) {
+        return TimelineReaderUtils.joinAndEscapeStrings(
+            new String[] {key.clusterId, key.dayTs.toString(), key.userId});
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(new String[] {
+          key.clusterId, key.dayTs.toString(), key.userId, key.flowName});
+    }
+
+    @Override
+    public FlowActivityRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 4) {
+        throw new IllegalArgumentException(
+            "Invalid row key for flow activity.");
+      }
+      Long dayTs = Long.valueOf(split.get(1));
+      return new FlowActivityRowKey(split.get(0), dayTs, split.get(2),
+          split.get(3));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java
new file mode 100644
index 0000000..eb88e54
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityRowKeyPrefix.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+
+/**
+ * A prefix partial rowkey for flow activities.
+ */
+public class FlowActivityRowKeyPrefix extends FlowActivityRowKey implements
+    RowKeyPrefix<FlowActivityRowKey> {
+
+  /**
+   * Constructs a row key prefix for the flow activity table as follows:
+   * {@code clusterId!dayTimestamp!}.
+   *
+   * @param clusterId Cluster Id.
+   * @param dayTs Start of the day timestamp.
+   */
+  public FlowActivityRowKeyPrefix(String clusterId, Long dayTs) {
+    super(clusterId, dayTs, null, null, false);
+  }
+
+  /**
+   * Constructs a row key prefix for the flow activity table as follows:
+   * {@code clusterId!}.
+   *
+   * @param clusterId identifying the cluster
+   */
+  public FlowActivityRowKeyPrefix(String clusterId) {
+    super(clusterId, null, null, null, false);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.application.
+   * RowKeyPrefix#getRowKeyPrefix()
+   */
+  public byte[] getRowKeyPrefix() {
+    return super.getRowKey();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java
new file mode 100644
index 0000000..e88a2fc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowActivityTable.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+
+/**
+ * The flow activity table has column family info
+ * Stores the daily activity record for flows
+ * Useful as a quick lookup of what flows were
+ * running on a given day
+ *
+ * Example flow activity table record:
+ *
+ * <pre>
+ * |-------------------------------------------|
+ * |  Row key   | Column Family                |
+ * |            | info                         |
+ * |-------------------------------------------|
+ * | clusterId! | r!runid1:version1            |
+ * | inv Top of |                              |
+ * | Day!       | r!runid2:version7            |
+ * | userName!  |                              |
+ * | flowName   |                              |
+ * |-------------------------------------------|
+ * </pre>
+ */
+public final class FlowActivityTable extends BaseTable<FlowActivityTable> {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
new file mode 100644
index 0000000..2132d04
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumn.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+/**
+ * Identifies fully qualified columns for the {@link FlowRunTable}.
+ */
+public enum FlowRunColumn implements Column<FlowRunTable> {
+
+  /**
+   * When the flow was started. This is the minimum of currently known
+   * application start times.
+   */
+  MIN_START_TIME(FlowRunColumnFamily.INFO, "min_start_time",
+      AggregationOperation.GLOBAL_MIN, new LongConverter()),
+
+  /**
+   * When the flow ended. This is the maximum of currently known application end
+   * times.
+   */
+  MAX_END_TIME(FlowRunColumnFamily.INFO, "max_end_time",
+      AggregationOperation.GLOBAL_MAX, new LongConverter()),
+
+  /**
+   * The version of the flow that this flow belongs to.
+   */
+  FLOW_VERSION(FlowRunColumnFamily.INFO, "flow_version", null);
+
+  private final ColumnFamily<FlowRunTable> columnFamily;
+  private final String columnQualifier;
+  private final byte[] columnQualifierBytes;
+  private final AggregationOperation aggOp;
+  private final ValueConverter valueConverter;
+
+  private FlowRunColumn(ColumnFamily<FlowRunTable> columnFamily,
+      String columnQualifier, AggregationOperation aggOp) {
+    this(columnFamily, columnQualifier, aggOp,
+        GenericConverter.getInstance());
+  }
+
+  private FlowRunColumn(ColumnFamily<FlowRunTable> columnFamily,
+      String columnQualifier, AggregationOperation aggOp,
+      ValueConverter converter) {
+    this.columnFamily = columnFamily;
+    this.columnQualifier = columnQualifier;
+    this.aggOp = aggOp;
+    // Future-proof by ensuring the right column prefix hygiene.
+    this.columnQualifierBytes = Bytes.toBytes(Separator.SPACE
+        .encode(columnQualifier));
+    this.valueConverter = converter;
+  }
+
+  /**
+   * @return the column name value
+   */
+  private String getColumnQualifier() {
+    return columnQualifier;
+  }
+
+  @Override
+  public byte[] getColumnQualifierBytes() {
+    return columnQualifierBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  public AggregationOperation getAggregationOperation() {
+    return aggOp;
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return HBaseTimelineSchemaUtils.combineAttributes(attributes, aggOp);
+  }
+
+  @Override
+  public boolean supplementCellTimestamp() {
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java
new file mode 100644
index 0000000..8faf5f8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnFamily.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the flow run table column families.
+ */
+public enum FlowRunColumnFamily implements ColumnFamily<FlowRunTable> {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value
+   *          create a column family with this name. Must be lower case and
+   *          without spaces.
+   */
+  private FlowRunColumnFamily(String value) {
+    // column families should be lower case and not contain any spaces.
+    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+    return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
new file mode 100644
index 0000000..cc06bb4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunColumnPrefix.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineSchemaUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+
+/**
+ * Identifies partially qualified columns for the {@link FlowRunTable}.
+ */
+public enum FlowRunColumnPrefix implements ColumnPrefix<FlowRunTable> {
+
+  /**
+   * To store flow run info values.
+   */
+  METRIC(FlowRunColumnFamily.INFO, "m", null, new LongConverter());
+
+  private final ColumnFamily<FlowRunTable> columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+  private final ValueConverter valueConverter;
+
+  private final AggregationOperation aggOp;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  private FlowRunColumnPrefix(ColumnFamily<FlowRunTable> columnFamily,
+      String columnPrefix, AggregationOperation fra, ValueConverter converter) {
+    this(columnFamily, columnPrefix, fra, converter, false);
+  }
+
+  private FlowRunColumnPrefix(ColumnFamily<FlowRunTable> columnFamily,
+      String columnPrefix, AggregationOperation fra, ValueConverter converter,
+      boolean compoundColQual) {
+    this.valueConverter = converter;
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes =
+          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
+    }
+    this.aggOp = fra;
+  }
+
+  /**
+   * @return the column name value
+   */
+  public String getColumnPrefix() {
+    return columnPrefix;
+  }
+
+  public byte[] getColumnPrefixBytes() {
+    return columnPrefixBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(this.columnPrefixBytes,
+        qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(this.columnPrefixBytes,
+        qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public byte[] getColumnPrefixInBytes() {
+    return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return HBaseTimelineSchemaUtils.combineAttributes(attributes, aggOp);
+  }
+
+  @Override
+  public boolean supplementCellTimeStamp() {
+    return true;
+  }
+
+  public AggregationOperation getAttribute() {
+    return aggOp;
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
new file mode 100644
index 0000000..7ce91cf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKey.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverterToString;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents a rowkey for the flow run table.
+ */
+public class FlowRunRowKey {
+  private final String clusterId;
+  private final String userId;
+  private final String flowName;
+  private final Long flowRunId;
+  private final FlowRunRowKeyConverter flowRunRowKeyConverter =
+      new FlowRunRowKeyConverter();
+
+  public FlowRunRowKey(String clusterId, String userId, String flowName,
+      Long flowRunId) {
+    this.clusterId = clusterId;
+    this.userId = userId;
+    this.flowName = flowName;
+    this.flowRunId = flowRunId;
+  }
+
+  public String getClusterId() {
+    return clusterId;
+  }
+
+  public String getUserId() {
+    return userId;
+  }
+
+  public String getFlowName() {
+    return flowName;
+  }
+
+  public Long getFlowRunId() {
+    return flowRunId;
+  }
+
+  /**
+   * Constructs a row key for the entity table as follows: {
+   * clusterId!userId!flowName!Inverted Flow Run Id}.
+   *
+   * @return byte array with the row key
+   */
+  public byte[] getRowKey() {
+    return flowRunRowKeyConverter.encode(this);
+  }
+
+
+  /**
+   * Given the raw row key as bytes, returns the row key as an object.
+   * @param rowKey Byte representation of row key.
+   * @return A <cite>FlowRunRowKey</cite> object.
+   */
+  public static FlowRunRowKey parseRowKey(byte[] rowKey) {
+    return new FlowRunRowKeyConverter().decode(rowKey);
+  }
+
+  /**
+   * Constructs a row key for the flow run table as follows:
+   * {@code clusterId!userId!flowName!Flow Run Id}.
+   * @return String representation of row key
+   */
+  public String getRowKeyAsString() {
+    return flowRunRowKeyConverter.encodeAsString(this);
+  }
+
+  /**
+   * Given the encoded row key as string, returns the row key as an object.
+   * @param encodedRowKey String representation of row key.
+   * @return A <cite>FlowRunRowKey</cite> object.
+   */
+  public static FlowRunRowKey parseRowKeyFromString(String encodedRowKey) {
+    return new FlowRunRowKeyConverter().decodeFromString(encodedRowKey);
+  }
+
+  /**
+   * returns the Flow Key as a verbose String output.
+   * @return String
+   */
+  @Override
+  public String toString() {
+    StringBuilder flowKeyStr = new StringBuilder();
+    flowKeyStr.append("{clusterId=" + clusterId);
+    flowKeyStr.append(" userId=" + userId);
+    flowKeyStr.append(" flowName=" + flowName);
+    flowKeyStr.append(" flowRunId=");
+    flowKeyStr.append(flowRunId);
+    flowKeyStr.append("}");
+    return flowKeyStr.toString();
+  }
+
+  /**
+   * Encodes and decodes row key for flow run table.
+   * The row key is of the form : clusterId!userId!flowName!flowrunId.
+   * flowrunId is a long and rest are strings.
+   * <p>
+   */
+  final private static class FlowRunRowKeyConverter implements
+      KeyConverter<FlowRunRowKey>, KeyConverterToString<FlowRunRowKey> {
+
+    private FlowRunRowKeyConverter() {
+    }
+
+    /**
+     * The flow run row key is of the form clusterId!userId!flowName!flowrunId
+     * with each segment separated by !. The sizes below indicate sizes of each
+     * one of these segments in sequence. clusterId, userId and flowName are
+     * strings. flowrunId is a long hence 8 bytes in size. Strings are variable
+     * in size (i.e. end whenever separator is encountered). This is used while
+     * decoding and helps in determining where to split.
+     */
+    private static final int[] SEGMENT_SIZES = {Separator.VARIABLE_SIZE,
+        Separator.VARIABLE_SIZE, Separator.VARIABLE_SIZE, Bytes.SIZEOF_LONG };
+
+    /*
+     * (non-Javadoc)
+     *
+     * Encodes FlowRunRowKey object into a byte array with each component/field
+     * in FlowRunRowKey separated by Separator#QUALIFIERS. This leads to an flow
+     * run row key of the form clusterId!userId!flowName!flowrunId If flowRunId
+     * in passed FlowRunRowKey object is null (and the fields preceding it i.e.
+     * clusterId, userId and flowName are not null), this returns a row key
+     * prefix of the form clusterId!userName!flowName! flowRunId is inverted
+     * while encoding as it helps maintain a descending order for flow keys in
+     * flow run table.
+     *
+     * @see
+     * org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#encode(java.lang.Object)
+     */
+    @Override
+    public byte[] encode(FlowRunRowKey rowKey) {
+      byte[] first =
+          Separator.QUALIFIERS.join(Separator.encode(rowKey.getClusterId(),
+              Separator.SPACE, Separator.TAB, Separator.QUALIFIERS), Separator
+              .encode(rowKey.getUserId(), Separator.SPACE, Separator.TAB,
+                  Separator.QUALIFIERS), Separator.encode(rowKey.getFlowName(),
+              Separator.SPACE, Separator.TAB, Separator.QUALIFIERS));
+      if (rowKey.getFlowRunId() == null) {
+        return Separator.QUALIFIERS.join(first, Separator.EMPTY_BYTES);
+      } else {
+        // Note that flowRunId is a long, so we can't encode them all at the
+        // same
+        // time.
+        byte[] second =
+            Bytes.toBytes(LongConverter.invertLong(rowKey.getFlowRunId()));
+        return Separator.QUALIFIERS.join(first, second);
+      }
+    }
+
+    /*
+     * (non-Javadoc)
+     *
+     * Decodes an flow run row key of the form
+     * clusterId!userId!flowName!flowrunId represented in byte format and
+     * converts it into an FlowRunRowKey object. flowRunId is inverted while
+     * decoding as it was inverted while encoding.
+     *
+     * @see
+     * org.apache.hadoop.yarn.server.timelineservice.storage.common
+     * .KeyConverter#decode(byte[])
+     */
+    @Override
+    public FlowRunRowKey decode(byte[] rowKey) {
+      byte[][] rowKeyComponents =
+          Separator.QUALIFIERS.split(rowKey, SEGMENT_SIZES);
+      if (rowKeyComponents.length != 4) {
+        throw new IllegalArgumentException("the row key is not valid for "
+            + "a flow run");
+      }
+      String clusterId =
+          Separator.decode(Bytes.toString(rowKeyComponents[0]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String userId =
+          Separator.decode(Bytes.toString(rowKeyComponents[1]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      String flowName =
+          Separator.decode(Bytes.toString(rowKeyComponents[2]),
+              Separator.QUALIFIERS, Separator.TAB, Separator.SPACE);
+      Long flowRunId =
+          LongConverter.invertLong(Bytes.toLong(rowKeyComponents[3]));
+      return new FlowRunRowKey(clusterId, userId, flowName, flowRunId);
+    }
+
+    @Override
+    public String encodeAsString(FlowRunRowKey key) {
+      if (key.clusterId == null || key.userId == null || key.flowName == null
+          || key.flowRunId == null) {
+        throw new IllegalArgumentException();
+      }
+      return TimelineReaderUtils.joinAndEscapeStrings(new String[] {
+          key.clusterId, key.userId, key.flowName, key.flowRunId.toString()});
+    }
+
+    @Override
+    public FlowRunRowKey decodeFromString(String encodedRowKey) {
+      List<String> split = TimelineReaderUtils.split(encodedRowKey);
+      if (split == null || split.size() != 4) {
+        throw new IllegalArgumentException(
+            "Invalid row key for flow run table.");
+      }
+      Long flowRunId = Long.valueOf(split.get(3));
+      return new FlowRunRowKey(split.get(0), split.get(1), split.get(2),
+          flowRunId);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyPrefix.java
new file mode 100644
index 0000000..23ebc66
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunRowKeyPrefix.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.RowKeyPrefix;
+
+/**
+ * Represents a partial rowkey (without the flowRunId) for the flow run table.
+ */
+public class FlowRunRowKeyPrefix extends FlowRunRowKey implements
+    RowKeyPrefix<FlowRunRowKey> {
+
+  /**
+   * Constructs a row key prefix for the flow run table as follows:
+   * {@code clusterId!userI!flowName!}.
+   *
+   * @param clusterId identifying the cluster
+   * @param userId identifying the user
+   * @param flowName identifying the flow
+   */
+  public FlowRunRowKeyPrefix(String clusterId, String userId,
+      String flowName) {
+    super(clusterId, userId, flowName, null);
+  }
+
+  /*
+   * (non-Javadoc)
+   *
+   * @see
+   * org.apache.hadoop.yarn.server.timelineservice.storage.application.
+   * RowKeyPrefix#getRowKeyPrefix()
+   */
+  public byte[] getRowKeyPrefix() {
+    // We know we're a FlowRunRowKey with null florRunId, so we can simply
+    // delegate
+    return super.getRowKey();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
new file mode 100644
index 0000000..643a102
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/FlowRunTable.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable;
+
+/**
+ * The flow run table has column family info
+ * Stores per flow run information
+ * aggregated across applications.
+ *
+ * Metrics are also stored in the info column family.
+ *
+ * Example flow run table record:
+ *
+ * <pre>
+ * flow_run table
+ * |-------------------------------------------|
+ * |  Row key   | Column Family                |
+ * |            | info                         |
+ * |-------------------------------------------|
+ * | clusterId! | flow_version:version7        |
+ * | userName!  |                              |
+ * | flowName!  | running_apps:1               |
+ * | flowRunId  |                              |
+ * |            | min_start_time:1392995080000 |
+ * |            | #0:""                        |
+ * |            |                              |
+ * |            | min_start_time:1392995081012 |
+ * |            | #0:appId2                    |
+ * |            |                              |
+ * |            | min_start_time:1392993083210 |
+ * |            | #0:appId3                    |
+ * |            |                              |
+ * |            |                              |
+ * |            | max_end_time:1392993084018   |
+ * |            | #0:""                        |
+ * |            |                              |
+ * |            |                              |
+ * |            | m!mapInputRecords:127        |
+ * |            | #0:""                        |
+ * |            |                              |
+ * |            | m!mapInputRecords:31         |
+ * |            | #2:appId2                    |
+ * |            |                              |
+ * |            | m!mapInputRecords:37         |
+ * |            | #1:appId3                    |
+ * |            |                              |
+ * |            |                              |
+ * |            | m!mapOutputRecords:181       |
+ * |            | #0:""                        |
+ * |            |                              |
+ * |            | m!mapOutputRecords:37        |
+ * |            | #1:appId3                    |
+ * |            |                              |
+ * |            |                              |
+ * |-------------------------------------------|
+ * </pre>
+ */
+public final class FlowRunTable extends BaseTable<FlowRunTable> {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
new file mode 100644
index 0000000..04963f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/flow/package-info.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage.flow
+ * contains classes related to implementation for flow related tables, viz. flow
+ * run table and flow activity table.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage.flow;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
new file mode 100644
index 0000000..e78db2a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/package-info.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Package org.apache.hadoop.yarn.server.timelineservice.storage contains
+ * classes which define and implement reading and writing to backend storage.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
new file mode 100644
index 0000000..a011a3f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumn.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Column;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies fully qualified columns for the {@link SubApplicationTable}.
+ */
+public enum SubApplicationColumn implements Column<SubApplicationTable> {
+
+  /**
+   * Identifier for the sub application.
+   */
+  ID(SubApplicationColumnFamily.INFO, "id"),
+
+  /**
+   * The type of sub application.
+   */
+  TYPE(SubApplicationColumnFamily.INFO, "type"),
+
+  /**
+   * When the sub application was created.
+   */
+  CREATED_TIME(SubApplicationColumnFamily.INFO, "created_time",
+      new LongConverter()),
+
+  /**
+   * The version of the flow that this sub application belongs to.
+   */
+  FLOW_VERSION(SubApplicationColumnFamily.INFO, "flow_version");
+
+  private final ColumnFamily<SubApplicationTable> columnFamily;
+  private final String columnQualifier;
+  private final byte[] columnQualifierBytes;
+  private final ValueConverter valueConverter;
+
+  SubApplicationColumn(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnQualifier) {
+    this(columnFamily, columnQualifier, GenericConverter.getInstance());
+  }
+
+  SubApplicationColumn(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnQualifier, ValueConverter converter) {
+    this.columnFamily = columnFamily;
+    this.columnQualifier = columnQualifier;
+    // Future-proof by ensuring the right column prefix hygiene.
+    this.columnQualifierBytes =
+        Bytes.toBytes(Separator.SPACE.encode(columnQualifier));
+    this.valueConverter = converter;
+  }
+
+  @Override
+  public byte[] getColumnQualifierBytes() {
+    return columnQualifierBytes.clone();
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return attributes;
+  }
+
+  @Override
+  public boolean supplementCellTimestamp() {
+    return false;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
new file mode 100644
index 0000000..1d7f8fd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnFamily.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+
+/**
+ * Represents the sub application table column families.
+ */
+public enum SubApplicationColumnFamily
+    implements ColumnFamily<SubApplicationTable> {
+
+  /**
+   * Info column family houses known columns, specifically ones included in
+   * columnfamily filters.
+   */
+  INFO("i"),
+
+  /**
+   * Configurations are in a separate column family for two reasons:
+   * a) the size of the config values can be very large and
+   * b) we expect that config values
+   * are often separately accessed from other metrics and info columns.
+   */
+  CONFIGS("c"),
+
+  /**
+   * Metrics have a separate column family, because they have a separate TTL.
+   */
+  METRICS("m");
+
+  /**
+   * Byte representation of this column family.
+   */
+  private final byte[] bytes;
+
+  /**
+   * @param value
+   *          create a column family with this name. Must be lower case and
+   *          without spaces.
+   */
+  SubApplicationColumnFamily(String value) {
+    // column families should be lower case and not contain any spaces.
+    this.bytes = Bytes.toBytes(Separator.SPACE.encode(value));
+  }
+
+  public byte[] getBytes() {
+    return Bytes.copy(bytes);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9af30d46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
new file mode 100644
index 0000000..1106e37
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/subapplication/SubApplicationColumnPrefix.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage.subapplication;
+
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnFamily;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnHelper;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnPrefix;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.GenericConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
+import org.apache.hadoop.yarn.server.timelineservice.storage.common.ValueConverter;
+import org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute;
+
+/**
+ * Identifies partially qualified columns for the sub app table.
+ */
+public enum SubApplicationColumnPrefix
+    implements ColumnPrefix<SubApplicationTable> {
+
+  /**
+   * To store TimelineEntity getIsRelatedToEntities values.
+   */
+  IS_RELATED_TO(SubApplicationColumnFamily.INFO, "s"),
+
+  /**
+   * To store TimelineEntity getRelatesToEntities values.
+   */
+  RELATES_TO(SubApplicationColumnFamily.INFO, "r"),
+
+  /**
+   * To store TimelineEntity info values.
+   */
+  INFO(SubApplicationColumnFamily.INFO, "i"),
+
+  /**
+   * Lifecycle events for an entity.
+   */
+  EVENT(SubApplicationColumnFamily.INFO, "e", true),
+
+  /**
+   * Config column stores configuration with config key as the column name.
+   */
+  CONFIG(SubApplicationColumnFamily.CONFIGS, null),
+
+  /**
+   * Metrics are stored with the metric name as the column name.
+   */
+  METRIC(SubApplicationColumnFamily.METRICS, null, new LongConverter());
+
+  private final ColumnFamily<SubApplicationTable> columnFamily;
+
+  /**
+   * Can be null for those cases where the provided column qualifier is the
+   * entire column name.
+   */
+  private final String columnPrefix;
+  private final byte[] columnPrefixBytes;
+  private final ValueConverter valueConverter;
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   */
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix) {
+    this(columnFamily, columnPrefix, false, GenericConverter.getInstance());
+  }
+
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, boolean compondColQual) {
+    this(columnFamily, columnPrefix, compondColQual,
+        GenericConverter.getInstance());
+  }
+
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, ValueConverter converter) {
+    this(columnFamily, columnPrefix, false, converter);
+  }
+
+  /**
+   * Private constructor, meant to be used by the enum definition.
+   *
+   * @param columnFamily that this column is stored in.
+   * @param columnPrefix for this column.
+   * @param converter used to encode/decode values to be stored in HBase for
+   * this column prefix.
+   */
+  SubApplicationColumnPrefix(ColumnFamily<SubApplicationTable> columnFamily,
+      String columnPrefix, boolean compondColQual, ValueConverter converter) {
+    this.valueConverter = converter;
+    this.columnFamily = columnFamily;
+    this.columnPrefix = columnPrefix;
+    if (columnPrefix == null) {
+      this.columnPrefixBytes = null;
+    } else {
+      // Future-proof by ensuring the right column prefix hygiene.
+      this.columnPrefixBytes =
+          Bytes.toBytes(Separator.SPACE.encode(columnPrefix));
+    }
+  }
+
+  /**
+   * @return the column name value
+   */
+  public String getColumnPrefix() {
+    return columnPrefix;
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(byte[] qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixBytes(String qualifierPrefix) {
+    return ColumnHelper.getColumnQualifier(
+        this.columnPrefixBytes, qualifierPrefix);
+  }
+
+  @Override
+  public byte[] getColumnPrefixInBytes() {
+    return columnPrefixBytes != null ? columnPrefixBytes.clone() : null;
+  }
+
+  @Override
+  public byte[] getColumnFamilyBytes() {
+    return columnFamily.getBytes();
+  }
+
+  @Override
+  public ValueConverter getValueConverter() {
+    return valueConverter;
+  }
+
+  @Override
+  public Attribute[] getCombinedAttrsWithAggr(Attribute... attributes) {
+    return attributes;
+  }
+
+  @Override
+  public boolean supplementCellTimeStamp() {
+    return false;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: Revert "YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan"

Posted by ha...@apache.org.
Revert "YARN-7677. Docker image cannot set HADOOP_CONF_DIR. Contributed by Jim Brennan"

This reverts commit 8013475d447a8377b5aed858208bf8b91dd32366.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9a429bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9a429bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9a429bb

Branch: refs/heads/HDFS-12996
Commit: b9a429bb2854910add8d4cf787e6ee65ebdfc9cf
Parents: 83e2bb9
Author: Jason Lowe <jl...@apache.org>
Authored: Mon Feb 19 08:16:25 2018 -0600
Committer: Jason Lowe <jl...@apache.org>
Committed: Mon Feb 19 08:16:25 2018 -0600

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/util/Apps.java  |  22 +--
 .../yarn/util/AuxiliaryServiceHelper.java       |   2 +-
 .../server/nodemanager/ContainerExecutor.java   |  62 +++------
 .../nodemanager/LinuxContainerExecutor.java     |   8 ++
 .../launcher/ContainerLaunch.java               |  88 ++++--------
 .../runtime/DefaultLinuxContainerRuntime.java   |   6 +
 .../DelegatingLinuxContainerRuntime.java        |  11 ++
 .../runtime/DockerLinuxContainerRuntime.java    |   7 +
 .../runtime/ContainerRuntime.java               |  11 ++
 .../launcher/TestContainerLaunch.java           | 133 ++-----------------
 10 files changed, 110 insertions(+), 240 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
index 1c90d55..685c6d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java
@@ -23,7 +23,6 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
 
 import java.io.File;
-import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.regex.Matcher;
@@ -106,26 +105,7 @@ public class Apps {
       }
     }
   }
-
-  /**
-   *
-   * @param envString String containing env variable definitions
-   * @param classPathSeparator String that separates the definitions
-   * @return ArrayList of environment variable names
-   */
-  public static ArrayList<String> getEnvVarsFromInputString(String envString,
-      String classPathSeparator) {
-    ArrayList<String> envList = new ArrayList<>();
-    if (envString != null && envString.length() > 0) {
-      Matcher varValMatcher = VARVAL_SPLITTER.matcher(envString);
-      while (varValMatcher.find()) {
-        String envVar = varValMatcher.group(1);
-        envList.add(envVar);
-      }
-    }
-    return envList;
-  }
-
+  
   /**
    * This older version of this method is kept around for compatibility
    * because downstream frameworks like Spark and Tez have been using it.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
index 1374d96..cb118f5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java
@@ -45,7 +45,7 @@ public class AuxiliaryServiceHelper {
         Base64.encodeBase64String(byteData));
   }
 
-  public static String getPrefixServiceName(String serviceName) {
+  private static String getPrefixServiceName(String serviceName) {
     return NM_AUX_SERVICE + serviceName;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 01cd992..f4279a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -27,7 +27,6 @@ import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -317,15 +316,14 @@ public abstract class ContainerExecutor implements Configurable {
    * @param command the command that will be run
    * @param logDir the log dir to which to copy debugging information
    * @param user the username of the job owner
-   * @param nmVars the set of environment vars that are explicitly set by NM
    * @throws IOException if any errors happened writing to the OutputStream,
    * while creating symlinks
    */
   public void writeLaunchEnv(OutputStream out, Map<String, String> environment,
       Map<Path, List<String>> resources, List<String> command, Path logDir,
-      String user, LinkedHashSet<String> nmVars) throws IOException {
+      String user) throws IOException {
     this.writeLaunchEnv(out, environment, resources, command, logDir, user,
-        ContainerLaunch.CONTAINER_SCRIPT, nmVars);
+        ContainerLaunch.CONTAINER_SCRIPT);
   }
 
   /**
@@ -341,15 +339,14 @@ public abstract class ContainerExecutor implements Configurable {
    * @param logDir the log dir to which to copy debugging information
    * @param user the username of the job owner
    * @param outFilename the path to which to write the launch environment
-   * @param nmVars the set of environment vars that are explicitly set by NM
    * @throws IOException if any errors happened writing to the OutputStream,
    * while creating symlinks
    */
   @VisibleForTesting
   public void writeLaunchEnv(OutputStream out, Map<String, String> environment,
       Map<Path, List<String>> resources, List<String> command, Path logDir,
-      String user, String outFilename, LinkedHashSet<String> nmVars)
-      throws IOException {
+      String user, String outFilename) throws IOException {
+    updateEnvForWhitelistVars(environment);
 
     ContainerLaunch.ShellScriptBuilder sb =
         ContainerLaunch.ShellScriptBuilder.create();
@@ -364,40 +361,8 @@ public abstract class ContainerExecutor implements Configurable {
 
     if (environment != null) {
       sb.echo("Setting up env variables");
-      // Whitelist environment variables are treated specially.
-      // Only add them if they are not already defined in the environment.
-      // Add them using special syntax to prevent them from eclipsing
-      // variables that may be set explicitly in the container image (e.g,
-      // in a docker image).  Put these before the others to ensure the
-      // correct expansion is used.
-      for(String var : whitelistVars) {
-        if (!environment.containsKey(var)) {
-          String val = getNMEnvVar(var);
-          if (val != null) {
-            sb.whitelistedEnv(var, val);
-          }
-        }
-      }
-      // Now write vars that were set explicitly by nodemanager, preserving
-      // the order they were written in.
-      for (String nmEnvVar : nmVars) {
-        sb.env(nmEnvVar, environment.get(nmEnvVar));
-      }
-      // Now write the remaining environment variables.
       for (Map.Entry<String, String> env : environment.entrySet()) {
-        if (!nmVars.contains(env.getKey())) {
-          sb.env(env.getKey(), env.getValue());
-        }
-      }
-      // Add the whitelist vars to the environment.  Do this after writing
-      // environment variables so they are not written twice.
-      for(String var : whitelistVars) {
-        if (!environment.containsKey(var)) {
-          String val = getNMEnvVar(var);
-          if (val != null) {
-            environment.put(var, val);
-          }
-        }
+        sb.env(env.getKey(), env.getValue());
       }
     }
 
@@ -698,6 +663,23 @@ public abstract class ContainerExecutor implements Configurable {
     }
   }
 
+  /**
+   * Propagate variables from the nodemanager's environment into the
+   * container's environment if unspecified by the container.
+   * @param env the environment to update
+   * @see org.apache.hadoop.yarn.conf.YarnConfiguration#NM_ENV_WHITELIST
+   */
+  protected void updateEnvForWhitelistVars(Map<String, String> env) {
+    for(String var : whitelistVars) {
+      if (!env.containsKey(var)) {
+        String val = getNMEnvVar(var);
+        if (val != null) {
+          env.put(var, val);
+        }
+      }
+    }
+  }
+
   @VisibleForTesting
   protected String getNMEnvVar(String varname) {
     return System.getenv(varname);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 44edc21..fe54e2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -66,6 +66,7 @@ import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Map;
 import java.util.regex.Pattern;
 
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*;
@@ -472,6 +473,13 @@ public class LinuxContainerExecutor extends ContainerExecutor {
   }
 
   @Override
+  protected void updateEnvForWhitelistVars(Map<String, String> env) {
+    if (linuxContainerRuntime.useWhitelistEnv(env)) {
+      super.updateEnvForWhitelistVars(env);
+    }
+  }
+
+  @Override
   public int launchContainer(ContainerStartContext ctx)
       throws IOException, ConfigurationException {
     Container container = ctx.getContainer();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index ca62a5c..112f54a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -33,9 +33,7 @@ import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
-import java.util.LinkedHashSet;
 import java.util.Map;
-import java.util.Set;
 import java.util.Map.Entry;
 import java.util.concurrent.Callable;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -219,9 +217,6 @@ public class ContainerLaunch implements Callable<Integer> {
           launchContext, containerLogDir);
       // /////////////////////////// End of variable expansion
 
-      // Use this to track variables that are added to the environment by nm.
-      LinkedHashSet<String> nmEnvVars = new LinkedHashSet<String>();
-
       FileContext lfs = FileContext.getLocalFSFileContext();
 
       Path nmPrivateContainerScriptPath = dirsHandler.getLocalPathForWrite(
@@ -266,7 +261,7 @@ public class ContainerLaunch implements Callable<Integer> {
       }
 
       // Set the token location too.
-      addToEnvMap(environment, nmEnvVars,
+      environment.put(
           ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME,
           new Path(containerWorkDir,
               FINAL_CONTAINER_TOKENS_FILE).toUri().getPath());
@@ -277,15 +272,14 @@ public class ContainerLaunch implements Callable<Integer> {
                    EnumSet.of(CREATE, OVERWRITE))) {
         // Sanitize the container's environment
         sanitizeEnv(environment, containerWorkDir, appDirs, userLocalDirs,
-            containerLogDirs, localResources, nmPrivateClasspathJarDir,
-            nmEnvVars);
+            containerLogDirs, localResources, nmPrivateClasspathJarDir);
 
         prepareContainer(localResources, containerLocalDirs);
 
         // Write out the environment
         exec.writeLaunchEnv(containerScriptOutStream, environment,
             localResources, launchContext.getCommands(),
-            containerLogDir, user, nmEnvVars);
+            containerLogDir, user);
       }
       // /////////// End of writing out container-script
 
@@ -1177,9 +1171,6 @@ public class ContainerLaunch implements Callable<Integer> {
 
     public abstract void env(String key, String value) throws IOException;
 
-    public abstract void whitelistedEnv(String key, String value)
-        throws IOException;
-
     public abstract void echo(String echoStr) throws IOException;
 
     public final void symlink(Path src, Path dst) throws IOException {
@@ -1300,11 +1291,6 @@ public class ContainerLaunch implements Callable<Integer> {
     }
 
     @Override
-    public void whitelistedEnv(String key, String value) throws IOException {
-      line("export ", key, "=${", key, ":-", "\"", value, "\"}");
-    }
-
-    @Override
     public void echo(final String echoStr) throws IOException {
       line("echo \"" + echoStr + "\"");
     }
@@ -1395,11 +1381,6 @@ public class ContainerLaunch implements Callable<Integer> {
     }
 
     @Override
-    public void whitelistedEnv(String key, String value) throws IOException {
-      env(key, value);
-    }
-
-    @Override
     public void echo(final String echoStr) throws IOException {
       lineWithLenCheck("@echo \"", echoStr, "\"");
     }
@@ -1454,70 +1435,60 @@ public class ContainerLaunch implements Callable<Integer> {
       putEnvIfNotNull(environment, variable, System.getenv(variable));
     }
   }
-
-  private static void addToEnvMap(
-      Map<String, String> envMap, Set<String> envSet,
-      String envName, String envValue) {
-    envMap.put(envName, envValue);
-    envSet.add(envName);
-  }
-
+  
   public void sanitizeEnv(Map<String, String> environment, Path pwd,
       List<Path> appDirs, List<String> userLocalDirs, List<String>
-      containerLogDirs, Map<Path, List<String>> resources,
-      Path nmPrivateClasspathJarDir,
-      Set<String> nmVars) throws IOException {
+      containerLogDirs,
+      Map<Path, List<String>> resources,
+      Path nmPrivateClasspathJarDir) throws IOException {
     /**
      * Non-modifiable environment variables
      */
 
-    addToEnvMap(environment, nmVars, Environment.CONTAINER_ID.name(),
-        container.getContainerId().toString());
+    environment.put(Environment.CONTAINER_ID.name(), container
+        .getContainerId().toString());
 
-    addToEnvMap(environment, nmVars, Environment.NM_PORT.name(),
+    environment.put(Environment.NM_PORT.name(),
       String.valueOf(this.context.getNodeId().getPort()));
 
-    addToEnvMap(environment, nmVars, Environment.NM_HOST.name(),
-        this.context.getNodeId().getHost());
+    environment.put(Environment.NM_HOST.name(), this.context.getNodeId()
+      .getHost());
 
-    addToEnvMap(environment, nmVars, Environment.NM_HTTP_PORT.name(),
+    environment.put(Environment.NM_HTTP_PORT.name(),
       String.valueOf(this.context.getHttpPort()));
 
-    addToEnvMap(environment, nmVars, Environment.LOCAL_DIRS.name(),
+    environment.put(Environment.LOCAL_DIRS.name(),
         StringUtils.join(",", appDirs));
 
-    addToEnvMap(environment, nmVars, Environment.LOCAL_USER_DIRS.name(),
-        StringUtils.join(",", userLocalDirs));
+    environment.put(Environment.LOCAL_USER_DIRS.name(), StringUtils.join(",",
+        userLocalDirs));
 
-    addToEnvMap(environment, nmVars, Environment.LOG_DIRS.name(),
+    environment.put(Environment.LOG_DIRS.name(),
       StringUtils.join(",", containerLogDirs));
 
-    addToEnvMap(environment, nmVars, Environment.USER.name(),
-        container.getUser());
-
-    addToEnvMap(environment, nmVars, Environment.LOGNAME.name(),
-        container.getUser());
+    environment.put(Environment.USER.name(), container.getUser());
+    
+    environment.put(Environment.LOGNAME.name(), container.getUser());
 
-    addToEnvMap(environment, nmVars, Environment.HOME.name(),
+    environment.put(Environment.HOME.name(),
         conf.get(
             YarnConfiguration.NM_USER_HOME_DIR, 
             YarnConfiguration.DEFAULT_NM_USER_HOME_DIR
             )
         );
-
-    addToEnvMap(environment, nmVars, Environment.PWD.name(), pwd.toString());
+    
+    environment.put(Environment.PWD.name(), pwd.toString());
+    
+    putEnvIfAbsent(environment, Environment.HADOOP_CONF_DIR.name());
 
     if (!Shell.WINDOWS) {
-      addToEnvMap(environment, nmVars, "JVM_PID", "$$");
+      environment.put("JVM_PID", "$$");
     }
 
     // variables here will be forced in, even if the container has specified them.
-    String nmAdminUserEnv = conf.get(
-        YarnConfiguration.NM_ADMIN_USER_ENV,
-        YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV);
-    Apps.setEnvFromInputString(environment, nmAdminUserEnv, File.pathSeparator);
-    nmVars.addAll(Apps.getEnvVarsFromInputString(nmAdminUserEnv,
-        File.pathSeparator));
+    Apps.setEnvFromInputString(environment, conf.get(
+      YarnConfiguration.NM_ADMIN_USER_ENV,
+      YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV), File.pathSeparator);
 
     // TODO: Remove Windows check and use this approach on all platforms after
     // additional testing.  See YARN-358.
@@ -1531,7 +1502,6 @@ public class ContainerLaunch implements Callable<Integer> {
         .getAuxServiceMetaData().entrySet()) {
       AuxiliaryServiceHelper.setServiceDataIntoEnv(
           meta.getKey(), meta.getValue(), environment);
-      nmVars.add(AuxiliaryServiceHelper.getPrefixServiceName(meta.getKey()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
index 83380ee..b50d56c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DefaultLinuxContainerRuntime.java
@@ -37,6 +37,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.util.List;
+import java.util.Map;
 
 import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.LinuxContainerRuntimeConstants.*;
 
@@ -73,6 +74,11 @@ public class DefaultLinuxContainerRuntime implements LinuxContainerRuntime {
   }
 
   @Override
+  public boolean useWhitelistEnv(Map<String, String> env) {
+    return true;
+  }
+
+  @Override
   public void prepareContainer(ContainerRuntimeContext ctx)
       throws ContainerExecutionException {
     //nothing to do here at the moment.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
index 675bffb..dd10617 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DelegatingLinuxContainerRuntime.java
@@ -94,6 +94,17 @@ public class DelegatingLinuxContainerRuntime implements LinuxContainerRuntime {
     }
   }
 
+  @Override
+  public boolean useWhitelistEnv(Map<String, String> env) {
+    try {
+      LinuxContainerRuntime runtime = pickContainerRuntime(env);
+      return runtime.useWhitelistEnv(env);
+    } catch (ContainerExecutionException e) {
+      LOG.debug("Unable to determine runtime");
+      return false;
+    }
+  }
+
   @VisibleForTesting
   LinuxContainerRuntime pickContainerRuntime(
       Map<String, String> environment) throws ContainerExecutionException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index de225e6..401fc4a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -371,6 +371,13 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
     return capabilities;
   }
 
+  @Override
+  public boolean useWhitelistEnv(Map<String, String> env) {
+    // Avoid propagating nodemanager environment variables into the container
+    // so those variables can be picked up from the Docker image instead.
+    return false;
+  }
+
   private String runDockerVolumeCommand(DockerVolumeCommand dockerVolumeCommand,
       Container container) throws ContainerExecutionException {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java
index 7caa0ed..aa294fc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/runtime/ContainerRuntime.java
@@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 
+import java.util.Map;
+
 /**
  * An abstraction for various container runtime implementations. Examples
  * include Process Tree, Docker, Appc runtimes etc. These implementations
@@ -83,4 +85,13 @@ public interface ContainerRuntime {
    * and hostname
    */
   String[] getIpAndHost(Container container) throws ContainerExecutionException;
+
+  /**
+   * Whether to propagate the whitelist of environment variables from the
+   * nodemanager environment into the container environment.
+   * @param env the container's environment variables
+   * @return true if whitelist variables should be propagated, false otherwise
+   * @see org.apache.hadoop.yarn.conf.YarnConfiguration#NM_ENV_WHITELIST
+   */
+  boolean useWhitelistEnv(Map<String, String> env);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a429bb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index 47e268c..5923f8e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -41,7 +41,6 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
-import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.StringTokenizer;
@@ -186,10 +185,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
       DefaultContainerExecutor defaultContainerExecutor =
           new DefaultContainerExecutor();
       defaultContainerExecutor.setConf(new YarnConfiguration());
-      LinkedHashSet<String> nmVars = new LinkedHashSet<>();
       defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
-          new Path(localLogDir.getAbsolutePath()), "user", tempFile.getName(),
-          nmVars);
+          new Path(localLogDir.getAbsolutePath()), "user", tempFile.getName());
       fos.flush();
       fos.close();
       FileUtil.setExecutable(tempFile, true);
@@ -263,9 +260,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
       DefaultContainerExecutor defaultContainerExecutor =
           new DefaultContainerExecutor();
       defaultContainerExecutor.setConf(new YarnConfiguration());
-      LinkedHashSet<String> nmVars = new LinkedHashSet<>();
       defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
-          new Path(localLogDir.getAbsolutePath()), "user", nmVars);
+          new Path(localLogDir.getAbsolutePath()), "user");
       fos.flush();
       fos.close();
       FileUtil.setExecutable(tempFile, true);
@@ -327,9 +323,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
     conf.set(YarnConfiguration.NM_ENV_WHITELIST,
         "HADOOP_MAPRED_HOME,HADOOP_YARN_HOME");
     defaultContainerExecutor.setConf(conf);
-    LinkedHashSet<String> nmVars = new LinkedHashSet<>();
     defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
-        new Path(localLogDir.getAbsolutePath()), "user", nmVars);
+        new Path(localLogDir.getAbsolutePath()), "user");
     String shellContent =
         new String(Files.readAllBytes(Paths.get(shellFile.getAbsolutePath())),
             StandardCharsets.UTF_8);
@@ -342,8 +337,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
     Assert.assertFalse(shellContent.contains("HADOOP_HDFS_HOME"));
     // Available in env and in whitelist
     Assert.assertTrue(shellContent.contains(
-        "export HADOOP_YARN_HOME=${HADOOP_YARN_HOME:-\"nodemanager_yarn_home\"}"
-      ));
+        "export HADOOP_YARN_HOME=\"nodemanager_yarn_home\""));
     fos.flush();
     fos.close();
   }
@@ -378,9 +372,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
     conf.set(YarnConfiguration.NM_ENV_WHITELIST,
         "HADOOP_MAPRED_HOME,HADOOP_YARN_HOME");
     lce.setConf(conf);
-    LinkedHashSet<String> nmVars = new LinkedHashSet<>();
     lce.writeLaunchEnv(fos, env, resources, commands,
-        new Path(localLogDir.getAbsolutePath()), "user", nmVars);
+        new Path(localLogDir.getAbsolutePath()), "user");
     String shellContent =
         new String(Files.readAllBytes(Paths.get(shellFile.getAbsolutePath())),
             StandardCharsets.UTF_8);
@@ -389,106 +382,13 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
     // Whitelisted variable overridden by container
     Assert.assertTrue(shellContent.contains(
         "export HADOOP_MAPRED_HOME=\"/opt/hadoopbuild\""));
-    // Available in env but not in whitelist
+    // Verify no whitelisted variables inherited from NM env
     Assert.assertFalse(shellContent.contains("HADOOP_HDFS_HOME"));
-    // Available in env and in whitelist
-    Assert.assertTrue(shellContent.contains(
-        "export HADOOP_YARN_HOME=${HADOOP_YARN_HOME:-\"nodemanager_yarn_home\"}"
-    ));
-    fos.flush();
-    fos.close();
-  }
-
-  @Test(timeout = 20000)
-  public void testWriteEnvOrder() throws Exception {
-    // Valid only for unix
-    assumeNotWindows();
-    List<String> commands = new ArrayList<String>();
-
-    // Setup user-defined environment
-    Map<String, String> env = new HashMap<String, String>();
-    env.put("USER_VAR_1", "1");
-    env.put("USER_VAR_2", "2");
-    env.put("NM_MODIFIED_VAR_1", "nm 1");
-    env.put("NM_MODIFIED_VAR_2", "nm 2");
-
-    // These represent vars explicitly set by NM
-    LinkedHashSet<String> trackedNmVars = new LinkedHashSet<>();
-    trackedNmVars.add("NM_MODIFIED_VAR_1");
-    trackedNmVars.add("NM_MODIFIED_VAR_2");
-
-    // Setup Nodemanager environment
-    final Map<String, String> nmEnv = new HashMap<>();
-    nmEnv.put("WHITELIST_VAR_1", "wl 1");
-    nmEnv.put("WHITELIST_VAR_2", "wl 2");
-    nmEnv.put("NON_WHITELIST_VAR_1", "nwl 1");
-    nmEnv.put("NON_WHITELIST_VAR_2", "nwl 2");
-    DefaultContainerExecutor defaultContainerExecutor =
-        new DefaultContainerExecutor() {
-          @Override
-          protected String getNMEnvVar(String varname) {
-            return nmEnv.get(varname);
-          }
-        };
-
-    // Setup conf with whitelisted variables
-    ArrayList<String> whitelistVars = new ArrayList<>();
-    whitelistVars.add("WHITELIST_VAR_1");
-    whitelistVars.add("WHITELIST_VAR_2");
-    YarnConfiguration conf = new YarnConfiguration();
-    conf.set(YarnConfiguration.NM_ENV_WHITELIST,
-        whitelistVars.get(0) + "," + whitelistVars.get(1));
-
-    // These are in the NM env, but not in the whitelist.
-    ArrayList<String> nonWhiteListEnv = new ArrayList<>();
-    nonWhiteListEnv.add("NON_WHITELIST_VAR_1");
-    nonWhiteListEnv.add("NON_WHITELIST_VAR_2");
-
-    // Write the launch script
-    File shellFile = Shell.appendScriptExtension(tmpDir, "hello");
-    Map<Path, List<String>> resources = new HashMap<Path, List<String>>();
-    FileOutputStream fos = new FileOutputStream(shellFile);
-    defaultContainerExecutor.setConf(conf);
-    defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
-        new Path(localLogDir.getAbsolutePath()), "user", trackedNmVars);
+    Assert.assertFalse(shellContent.contains("HADOOP_YARN_HOME"));
     fos.flush();
     fos.close();
-
-    // Examine the script
-    String shellContent =
-        new String(Files.readAllBytes(Paths.get(shellFile.getAbsolutePath())),
-            StandardCharsets.UTF_8);
-    // First make sure everything is there that's supposed to be
-    for (String envVar : env.keySet()) {
-      Assert.assertTrue(shellContent.contains(envVar + "="));
-    }
-    for (String wlVar : whitelistVars) {
-      Assert.assertTrue(shellContent.contains(wlVar + "="));
-    }
-    for (String nwlVar : nonWhiteListEnv) {
-      Assert.assertFalse(shellContent.contains(nwlVar + "="));
-    }
-    // Explicitly Set NM vars should be before user vars
-    for (String nmVar : trackedNmVars) {
-      for (String userVar : env.keySet()) {
-        // Need to skip nm vars and whitelist vars
-        if (!trackedNmVars.contains(userVar) &&
-            !whitelistVars.contains(userVar)) {
-          Assert.assertTrue(shellContent.indexOf(nmVar + "=") <
-              shellContent.indexOf(userVar + "="));
-        }
-      }
-    }
-    // Whitelisted vars should be before explicitly set NM vars
-    for (String wlVar : whitelistVars) {
-      for (String nmVar : trackedNmVars) {
-        Assert.assertTrue(shellContent.indexOf(wlVar + "=") <
-            shellContent.indexOf(nmVar + "="));
-      }
-    }
   }
 
-
   @Test (timeout = 20000)
   public void testInvalidEnvSyntaxDiagnostics() throws IOException  {
 
@@ -510,9 +410,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
       DefaultContainerExecutor defaultContainerExecutor =
           new DefaultContainerExecutor();
       defaultContainerExecutor.setConf(new YarnConfiguration());
-      LinkedHashSet<String> nmVars = new LinkedHashSet<>();
       defaultContainerExecutor.writeLaunchEnv(fos, env, resources, commands,
-          new Path(localLogDir.getAbsolutePath()), "user", nmVars);
+          new Path(localLogDir.getAbsolutePath()), "user");
       fos.flush();
       fos.close();
 
@@ -594,9 +493,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
       commands.add(command);
       ContainerExecutor exec = new DefaultContainerExecutor();
       exec.setConf(new YarnConfiguration());
-      LinkedHashSet<String> nmVars = new LinkedHashSet<>();
       exec.writeLaunchEnv(fos, env, resources, commands,
-          new Path(localLogDir.getAbsolutePath()), "user", nmVars);
+          new Path(localLogDir.getAbsolutePath()), "user");
       fos.flush();
       fos.close();
 
@@ -687,7 +585,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
     Path nmp = new Path(testDir);
 
     launch.sanitizeEnv(userSetEnv, pwd, appDirs, userLocalDirs, containerLogs,
-        resources, nmp, Collections.emptySet());
+        resources, nmp);
 
     List<String> result =
       getJarManifestClasspath(userSetEnv.get(Environment.CLASSPATH.name()));
@@ -706,7 +604,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
         dispatcher, exec, null, container, dirsHandler, containerManager);
 
     launch.sanitizeEnv(userSetEnv, pwd, appDirs, userLocalDirs, containerLogs,
-        resources, nmp, Collections.emptySet());
+        resources, nmp);
 
     result =
       getJarManifestClasspath(userSetEnv.get(Environment.CLASSPATH.name()));
@@ -1630,10 +1528,9 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
         FileOutputStream fos = new FileOutputStream(tempFile);
         ContainerExecutor exec = new DefaultContainerExecutor();
         exec.setConf(conf);
-        LinkedHashSet<String> nmVars = new LinkedHashSet<>();
         exec.writeLaunchEnv(fos, env, resources, commands,
             new Path(localLogDir.getAbsolutePath()), "user",
-            tempFile.getName(), nmVars);
+            tempFile.getName());
         fos.flush();
         fos.close();
         FileUtil.setExecutable(tempFile, true);
@@ -1856,9 +1753,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
       List<String> commands = new ArrayList<String>();
       DefaultContainerExecutor executor = new DefaultContainerExecutor();
       executor.setConf(new Configuration());
-      LinkedHashSet<String> nmVars = new LinkedHashSet<>();
       executor.writeLaunchEnv(fos, env, resources, commands,
-          new Path(localLogDir.getAbsolutePath()), user, nmVars);
+          new Path(localLogDir.getAbsolutePath()), user);
       fos.flush();
       fos.close();
 
@@ -1902,9 +1798,8 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
       Configuration execConf = new Configuration();
       execConf.setBoolean(YarnConfiguration.NM_LOG_CONTAINER_DEBUG_INFO, false);
       executor.setConf(execConf);
-      LinkedHashSet<String> nmVars = new LinkedHashSet<>();
       executor.writeLaunchEnv(fos, env, resources, commands,
-          new Path(localLogDir.getAbsolutePath()), user, nmVars);
+          new Path(localLogDir.getAbsolutePath()), user);
       fos.flush();
       fos.close();
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: HADOOP-15007. Stabilize and document Configuration element. Contributed by Ajay Kumar.

Posted by ha...@apache.org.
HADOOP-15007. Stabilize and document Configuration <tag> element. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3688e491
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3688e491
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3688e491

Branch: refs/heads/HDFS-12996
Commit: 3688e491d528edb9efe54c4ef110d2ded62db3e8
Parents: d1cd573
Author: Anu Engineer <ae...@apache.org>
Authored: Fri Feb 23 10:26:22 2018 -0800
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri Feb 23 10:26:22 2018 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/conf/Configuration.java   | 145 +++++++++++--------
 .../org/apache/hadoop/conf/CorePropertyTag.java |  37 -----
 .../org/apache/hadoop/conf/HDFSPropertyTag.java |  41 ------
 .../org/apache/hadoop/conf/PropertyTag.java     |  30 ----
 .../org/apache/hadoop/conf/YarnPropertyTag.java |  39 -----
 .../fs/CommonConfigurationKeysPublic.java       |   2 +
 .../src/main/resources/core-default.xml         |   8 +
 .../apache/hadoop/conf/TestConfiguration.java   | 121 ++++++++--------
 8 files changed, 153 insertions(+), 270 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index f8e4638..00b4702 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -83,7 +83,6 @@ import javax.xml.transform.stream.StreamResult;
 
 import com.google.common.base.Charsets;
 import org.apache.commons.collections.map.UnmodifiableMap;
-import org.apache.commons.io.FilenameUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -195,6 +194,30 @@ import static org.apache.commons.lang3.StringUtils.isNotBlank;
  * parameters and these are suppressible by configuring
  * <tt>log4j.logger.org.apache.hadoop.conf.Configuration.deprecation</tt> in
  * log4j.properties file.
+ *
+ * <h4 id="Tags">Tags</h4>
+ *
+ * <p>Optionally we can tag related properties together by using tag
+ * attributes. System tags are defined by hadoop.system.tags property. Users
+ * can define there own custom tags in  hadoop.custom.tags property.
+ *
+ * <p>For example, we can tag existing property as:
+ * <tt><pre>
+ *  &lt;property&gt;
+ *    &lt;name&gt;dfs.replication&lt;/name&gt;
+ *    &lt;value&gt;3&lt;/value&gt;
+ *    &lt;tag&gt;HDFS,REQUIRED&lt;/tag&gt;
+ *  &lt;/property&gt;
+ *
+ *  &lt;property&gt;
+ *    &lt;name&gt;dfs.data.transfer.protection&lt;/name&gt;
+ *    &lt;value&gt;3&lt;/value&gt;
+ *    &lt;tag&gt;HDFS,SECURITY&lt;/tag&gt;
+ *  &lt;/property&gt;
+ * </pre></tt>
+ * <p> Properties marked with tags can be retrieved with <tt>conf
+ * .getAllPropertiesByTag("HDFS")</tt> or <tt>conf.getAllPropertiesByTags
+ * (Arrays.asList("YARN","SECURITY"))</tt>.</p>
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
@@ -206,6 +229,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   private static final Logger LOG_DEPRECATION =
       LoggerFactory.getLogger(
           "org.apache.hadoop.conf.Configuration.deprecation");
+  private static final Set<String> TAGS = new HashSet<>();
 
   private boolean quietmode = true;
 
@@ -297,14 +321,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     new WeakHashMap<Configuration,Object>();
 
   /**
-   * Map to register all classes holding property tag enums.
-   */
-  private static final Map<String, Class>
-      REGISTERED_TAG_CLASS = new HashMap<>();
-  /**
    * Map to hold properties by there tag groupings.
    */
-  private final Map<PropertyTag, Properties> propertyTagsMap =
+  private final Map<String, Properties> propertyTagsMap =
       new ConcurrentHashMap<>();
 
   /**
@@ -785,11 +804,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public Configuration(boolean loadDefaults) {
     this.loadDefaults = loadDefaults;
 
-    // Register all classes holding property tags with
-    REGISTERED_TAG_CLASS.put("core", CorePropertyTag.class);
-    REGISTERED_TAG_CLASS.put("hdfs", HDFSPropertyTag.class);
-    REGISTERED_TAG_CLASS.put("yarn", YarnPropertyTag.class);
-
     synchronized(Configuration.class) {
       REGISTRY.put(this, null);
     }
@@ -820,7 +834,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       this.finalParameters = Collections.newSetFromMap(
           new ConcurrentHashMap<String, Boolean>());
       this.finalParameters.addAll(other.finalParameters);
-      this.REGISTERED_TAG_CLASS.putAll(other.REGISTERED_TAG_CLASS);
       this.propertyTagsMap.putAll(other.propertyTagsMap);
     }
 
@@ -2919,6 +2932,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         resources.set(i, ret);
       }
     }
+    this.removeUndeclaredTags(properties);
   }
   
   private Resource loadResource(Properties properties,
@@ -3123,7 +3137,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
               break;
             }
             confSource.add(name);
-            //Read tags and put them in propertyTagsMap
+            // Read tags and put them in propertyTagsMap
             if (confTag != null) {
               readTagFromConfig(confTag, confName, confValue, confSource);
             }
@@ -3165,48 +3179,61 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
   }
 
+  /**
+   * Removes undeclared tags and related properties from propertyTagsMap.
+   * Its required because ordering of properties in xml config files is not
+   * guaranteed.
+   * @param prop
+   */
+  private void removeUndeclaredTags(Properties prop) {
+    // Get all system tags
+    if (prop.containsKey(CommonConfigurationKeys.HADOOP_SYSTEM_TAGS)){
+      String systemTags = prop.getProperty(CommonConfigurationKeys
+              .HADOOP_SYSTEM_TAGS);
+      Arrays.stream(systemTags.split(",")).forEach(tag -> TAGS.add(tag));
+    }
+    // Get all custom tags
+    if (prop.containsKey(CommonConfigurationKeys.HADOOP_CUSTOM_TAGS)) {
+      String customTags = prop.getProperty(CommonConfigurationKeys
+          .HADOOP_CUSTOM_TAGS);
+      Arrays.stream(customTags.split(",")).forEach(tag -> TAGS.add(tag));
+    }
+
+    Set undeclaredTags = propertyTagsMap.keySet();
+    if (undeclaredTags.retainAll(TAGS)) {
+      LOG.info("Removed undeclared tags:");
+    }
+  }
+
+  /**
+   * Read the values passed as tags and store them in a
+   * map for later retrieval.
+   * @param attributeValue
+   * @param confName
+   * @param confValue
+   * @param confSource
+   */
   private void readTagFromConfig(String attributeValue, String confName, String
       confValue, List<String> confSource) {
     for (String tagStr : attributeValue.split(",")) {
       tagStr = tagStr.trim();
       try {
-        if (confSource.size() > 0) {
-          for (String source : confSource) {
-            PropertyTag tag1 = this.getPropertyTag(tagStr,
-                FilenameUtils.getName(source).split("-")[0]);
-            if (tag1 != null) {
-              //Handle property with no/null value
-              if (confValue == null) {
-                confValue = "";
-              }
-              if (propertyTagsMap.containsKey(tag1)) {
-                propertyTagsMap.get(tag1).setProperty(confName, confValue);
-              } else {
-                Properties props = new Properties();
-                props.setProperty(confName, confValue);
-                propertyTagsMap.put(tag1, props);
-              }
-            }
-          }
+        // Handle property with no/null value
+        if (confValue == null) {
+          confValue = "";
+        }
+        if (propertyTagsMap.containsKey(tagStr)) {
+          propertyTagsMap.get(tagStr).setProperty(confName, confValue);
         } else {
-          // If no source is set try to find tag in CorePropertyTag
-          if (propertyTagsMap.containsKey(CorePropertyTag.valueOf(tagStr))) {
-            propertyTagsMap.get(CorePropertyTag.valueOf(tagStr))
-                .setProperty(confName, confValue);
-          } else {
-            Properties props = new Properties();
-            props.setProperty(confName, confValue);
-            propertyTagsMap.put(CorePropertyTag.valueOf(tagStr),
-                props);
-          }
+          Properties props = new Properties();
+          props.setProperty(confName, confValue);
+          propertyTagsMap.put(tagStr, props);
         }
       } catch (Exception ex) {
-        // Log the invalid tag and continue to parse rest of the properties.
-        LOG.info("Invalid tag '" + tagStr + "' found for "
-            + "property:" + confName + " Source:" + Arrays
-            .toString(confSource.toArray()), ex);
+        // Log the exception at trace level.
+        LOG.trace("Tag '{}' for property:{} Source:{}", tagStr, confName,
+            Arrays.toString(confSource.toArray()), ex);
       }
-
     }
   }
 
@@ -3690,9 +3717,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
 
   /**
    * Get all properties belonging to tag.
-   * @return Properties with matching properties
+   * @param tag tag
+   * @return Properties with matching tag
    */
-  public Properties getAllPropertiesByTag(final PropertyTag tag) {
+  public Properties getAllPropertiesByTag(final String tag) {
     Properties props = new Properties();
     if (propertyTagsMap.containsKey(tag)) {
       props.putAll(propertyTagsMap.get(tag));
@@ -3703,12 +3731,12 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   /**
    * Get all properties belonging to list of input tags. Calls
    * getAllPropertiesByTag internally.
-   *
-   * @return Properties with all matching properties
+   * @param tagList list of input tags
+   * @return Properties with matching tags
    */
-  public Properties getAllPropertiesByTags(final List<PropertyTag> tagList) {
+  public Properties getAllPropertiesByTags(final List<String> tagList) {
     Properties prop = new Properties();
-    for (PropertyTag tag : tagList) {
+    for (String tag : tagList) {
       prop.putAll(this.getAllPropertiesByTag(tag));
     }
     return prop;
@@ -3718,15 +3746,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * Get Property tag Enum corresponding to given source.
    *
    * @param tagStr String representation of Enum
-   * @param group Group to which enum belongs.Ex hdfs,yarn
-   * @return Properties with all matching properties
+   * @return true if tagStr is a valid tag
    */
-  private PropertyTag getPropertyTag(String tagStr, String group) {
-    PropertyTag tag = null;
-    if (REGISTERED_TAG_CLASS.containsKey(group)) {
-      tag = (PropertyTag) Enum.valueOf(REGISTERED_TAG_CLASS.get(group), tagStr);
-    }
-    return tag;
+  public boolean isPropertyTag(String tagStr) {
+    return this.TAGS.contains(tagStr);
   }
 
   private void putIntoUpdatingResource(String key, String[] value) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/CorePropertyTag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/CorePropertyTag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/CorePropertyTag.java
deleted file mode 100644
index 54a75b8..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/CorePropertyTag.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.conf;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/***************************************************************************
- * Enum for tagging hadoop core properties according to there usage.
- * CorePropertyTag implements the
- * {@link org.apache.hadoop.conf.PropertyTag} interface,
- ***************************************************************************/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public enum CorePropertyTag implements PropertyTag {
-  CORE,
-  REQUIRED,
-  PERFORMANCE,
-  CLIENT,
-  SERVER,
-  SECURITY,
-  DEBUG
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/HDFSPropertyTag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/HDFSPropertyTag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/HDFSPropertyTag.java
deleted file mode 100644
index 02dfb86..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/HDFSPropertyTag.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.conf;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/***************************************************************************
- * Enum for tagging hdfs properties according to there usage or application.
- * HDFSPropertyTag implements the
- * {@link org.apache.hadoop.conf.PropertyTag} interface,
- ***************************************************************************/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public enum HDFSPropertyTag implements PropertyTag {
-  HDFS,
-  NAMENODE,
-  DATANODE,
-  REQUIRED,
-  SECURITY,
-  KERBEROS,
-  PERFORMANCE,
-  CLIENT,
-  SERVER,
-  DEBUG,
-  DEPRICATED
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/PropertyTag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/PropertyTag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/PropertyTag.java
deleted file mode 100644
index df8d4f9..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/PropertyTag.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.conf;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/***********************************************************
- * PropertyTag is used for creating extendable property tag Enums.
- * Property tags will group related properties together.
- ***********************************************************/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface PropertyTag {
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/YarnPropertyTag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/YarnPropertyTag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/YarnPropertyTag.java
deleted file mode 100644
index e7a9c79..0000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/YarnPropertyTag.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.conf;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/***************************************************************************
- * Enum for tagging yarn properties according to there usage or application.
- * YarnPropertyTag implements the
- * {@link org.apache.hadoop.conf.PropertyTag} interface,
- ***************************************************************************/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public enum YarnPropertyTag implements PropertyTag {
-  YARN,
-  RESOURCEMANAGER,
-  SECURITY,
-  KERBEROS,
-  PERFORMANCE,
-  CLIENT,
-  REQUIRED,
-  SERVER,
-  DEBUG
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 3c8628c..bbc892c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -860,5 +860,7 @@ public class CommonConfigurationKeysPublic {
           "credential$",
           "oauth.*token$",
           HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS);
+  public static final String HADOOP_SYSTEM_TAGS = "hadoop.system.tags";
+  public static final String HADOOP_CUSTOM_TAGS = "hadoop.custom.tags";
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ece54c4..b5163a1 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2959,4 +2959,12 @@
       HADOOP-13805,HADOOP-13558.
     </description>
   </property>
+  <property>
+    <name>hadoop.system.tags</name>
+    <value>YARN,HDFS,NAMENODE,DATANODE,REQUIRED,SECURITY,KERBEROS,PERFORMANCE,CLIENT
+      ,SERVER,DEBUG,DEPRICATED,COMMON,OPTIONAL</value>
+    <description>
+      System tags to group related properties together.
+    </description>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688e491/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 24ec4fc..c9dd7cc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -26,11 +26,14 @@ import java.io.FileWriter;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStreamWriter;
+import java.io.PrintStream;
 import java.io.StringWriter;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -2339,22 +2342,29 @@ public class TestConfiguration {
   @Test
   public void testGetAllPropertiesByTags() throws Exception {
 
-    out = new BufferedWriter(new FileWriter(CONFIG_CORE));
-    startConfig();
-    appendPropertyByTag("dfs.cblock.trace.io", "false", "DEBUG");
-    appendPropertyByTag("dfs.replication", "1", "PERFORMANCE,REQUIRED");
-    appendPropertyByTag("dfs.namenode.logging.level", "INFO", "CLIENT,DEBUG");
-    endConfig();
+    try{
+      out = new BufferedWriter(new FileWriter(CONFIG_CORE));
+      startConfig();
+      appendProperty("hadoop.system.tags", "YARN,HDFS,NAMENODE");
+      appendProperty("hadoop.custom.tags", "MYCUSTOMTAG");
+      appendPropertyByTag("dfs.cblock.trace.io", "false", "YARN");
+      appendPropertyByTag("dfs.replication", "1", "HDFS");
+      appendPropertyByTag("dfs.namenode.logging.level", "INFO", "NAMENODE");
+      appendPropertyByTag("dfs.random.key", "XYZ", "MYCUSTOMTAG");
+      endConfig();
 
-    Path fileResource = new Path(CONFIG_CORE);
-    conf.addResource(fileResource);
-    conf.getProps();
+      Path fileResource = new Path(CONFIG_CORE);
+      conf.addResource(fileResource);
+      conf.getProps();
 
-    List<PropertyTag> tagList = new ArrayList<>();
-    tagList.add(CorePropertyTag.REQUIRED);
-    tagList.add(CorePropertyTag.PERFORMANCE);
-    tagList.add(CorePropertyTag.DEBUG);
-    tagList.add(CorePropertyTag.CLIENT);
+    } finally {
+      out.close();
+    }
+    System.out.println(Files.readAllLines(Paths.get(CONFIG_CORE)));
+    List<String> tagList = new ArrayList<>();
+    tagList.add("YARN");
+    tagList.add("HDFS");
+    tagList.add("NAMENODE");
 
     Properties properties = conf.getAllPropertiesByTags(tagList);
     String[] sources = conf.getPropertySources("dfs.replication");
@@ -2366,58 +2376,45 @@ public class TestConfiguration {
     assertEq(true, properties.containsKey("dfs.replication"));
     assertEq(true, properties.containsKey("dfs.cblock.trace.io"));
     assertEq(false, properties.containsKey("namenode.host"));
+
+    properties = conf.getAllPropertiesByTag("DEBUG");
+    assertEq(0, properties.size());
+    assertEq(false, properties.containsKey("dfs.namenode.logging.level"));
+    assertEq(true, conf.isPropertyTag("YARN"));
+    assertEq(true, conf.isPropertyTag("HDFS"));
+    assertEq(true, conf.isPropertyTag("NAMENODE"));
+    assertEq(true, conf.isPropertyTag("MYCUSTOMTAG"));
+    assertEq(false, conf.isPropertyTag("CMYCUSTOMTAG2"));
   }
 
   @Test
-  public void testGetAllPropertiesWithSourceByTags() throws Exception {
-
-    out = new BufferedWriter(new FileWriter(CONFIG));
-    startConfig();
-    appendPropertyByTag("dfs.cblock.trace.io", "false", "DEBUG",
-        "hdfs-default.xml", "core-site.xml");
-    appendPropertyByTag("dfs.replication", "1", "PERFORMANCE,HDFS",
-        "hdfs-default.xml");
-    appendPropertyByTag("yarn.resourcemanager.work-preserving-recovery"
-        + ".enabled", "INFO", "CLIENT,DEBUG", "yarn-default.xml", "yarn-site"
-        + ".xml");
-    endConfig();
-
-    Path fileResource = new Path(CONFIG);
-    conf.addResource(fileResource);
-    conf.getProps();
-
-    List<PropertyTag> tagList = new ArrayList<>();
-    tagList.add(CorePropertyTag.REQUIRED);
-
-    Properties properties;
-    properties = conf.getAllPropertiesByTags(tagList);
-    assertNotEquals(3, properties.size());
-
-    tagList.add(HDFSPropertyTag.DEBUG);
-    tagList.add(YarnPropertyTag.CLIENT);
-    tagList.add(HDFSPropertyTag.PERFORMANCE);
-    tagList.add(HDFSPropertyTag.HDFS);
-    properties = conf.getAllPropertiesByTags(tagList);
-    assertEq(3, properties.size());
+  public void testInvalidTags() throws Exception {
+    PrintStream output = System.out;
+    try {
+      ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+      System.setOut(new PrintStream(bytes));
 
-    assertEq(true, properties.containsKey("dfs.cblock.trace.io"));
-    assertEq(true, properties.containsKey("dfs.replication"));
-    assertEq(true, properties
-        .containsKey("yarn.resourcemanager.work-preserving-recovery.enabled"));
-    assertEq(false, properties.containsKey("namenode.host"));
+      out = new BufferedWriter(new FileWriter(CONFIG));
+      startConfig();
+      appendPropertyByTag("dfs.cblock.trace.io", "false", "MYOWNTAG,TAG2");
+      endConfig();
 
-    tagList.clear();
-    tagList.add(HDFSPropertyTag.DEBUG);
-    properties = conf.getAllPropertiesByTags(tagList);
-    assertEq(true, properties.containsKey("dfs.cblock.trace.io"));
-    assertEq(false, properties.containsKey("yarn.resourcemanager"
-        + ".work-preserving-recovery"));
-
-    tagList.clear();
-    tagList.add(YarnPropertyTag.DEBUG);
-    properties = conf.getAllPropertiesByTags(tagList);
-    assertEq(false, properties.containsKey("dfs.cblock.trace.io"));
-    assertEq(true, properties.containsKey("yarn.resourcemanager"
-        + ".work-preserving-recovery.enabled"));
+      Path fileResource = new Path(CONFIG);
+      conf.addResource(fileResource);
+      conf.getProps();
+
+      List<String> tagList = new ArrayList<>();
+      tagList.add("REQUIRED");
+      tagList.add("MYOWNTAG");
+      tagList.add("TAG2");
+
+      Properties properties = conf.getAllPropertiesByTags(tagList);
+      assertEq(0, properties.size());
+      assertFalse(properties.containsKey("dfs.cblock.trace.io"));
+      assertFalse(bytes.toString().contains("Invalid tag "));
+      assertFalse(bytes.toString().contains("Tag"));
+    } finally {
+      System.setOut(output);
+    }
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org