You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by su...@apache.org on 2018/10/31 18:23:12 UTC

[01/50] [abbrv] hadoop git commit: HDDS-703. Ozone docs does not render correctly on a Mobile Device. Contributed by Elek, Marton.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 ddca0cf68 -> 8b5277fd1


HDDS-703. Ozone docs does not render correctly on a Mobile Device. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7f349bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7f349bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7f349bc

Branch: refs/heads/HDFS-12943
Commit: c7f349bc3f9640c6beddc3ea25f35d1297190587
Parents: 2388712
Author: Márton Elek <el...@apache.org>
Authored: Thu Oct 25 15:00:39 2018 +0200
Committer: Márton Elek <el...@apache.org>
Committed: Thu Oct 25 15:00:39 2018 +0200

----------------------------------------------------------------------
 .../docs/themes/ozonedoc/layouts/partials/navbar.html        | 5 +++--
 .../docs/themes/ozonedoc/layouts/partials/sidebar.html       | 8 +++++++-
 2 files changed, 10 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7f349bc/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
index 3cd8609..316f2cc 100644
--- a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/navbar.html
@@ -17,13 +17,14 @@
 <nav class="navbar navbar-inverse navbar-fixed-top">
   <div class="container-fluid">
     <div class="navbar-header">
-      <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
+      <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#sidebar" aria-expanded="false" aria-controls="navbar">
         <span class="sr-only">Toggle navigation</span>
         <span class="icon-bar"></span>
         <span class="icon-bar"></span>
         <span class="icon-bar"></span>
       </button>
-      <a class="navbar-brand" href="#">Apache Hadoop Ozone/HDDS documentation</a>
+      <a class="navbar-brand hidden-xs" href="#">Apache Hadoop Ozone/HDDS documentation</a>
+      <a class="navbar-brand visible-xs-inline" href="#">Hadoop Ozone</a>
     </div>
     <div id="navbar" class="navbar-collapse collapse">
       <ul class="nav navbar-nav navbar-right">

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7f349bc/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
index 7fae50d..b8471b9 100644
--- a/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
+++ b/hadoop-ozone/docs/themes/ozonedoc/layouts/partials/sidebar.html
@@ -14,7 +14,7 @@
   See the License for the specific language governing permissions and
   limitations under the License.
 -->
-<div class="col-sm-3 col-md-2 sidebar">
+<div class="col-sm-3 col-md-2 sidebar" id="sidebar">
   <img src="ozone-logo.png" style="max-width: 100%;"/>
   <ul class="nav nav-sidebar">
     {{ $currentPage := . }}
@@ -47,6 +47,12 @@
             </li>
         {{ end }}
     {{ end }}
+    <li class="visible-xs"><a href="#">References</a>
+    <ul class="nav">
+        <li><a href="https://github.com/apache/hadoop"><span class="glyphicon glyphicon-new-window" aria-hidden="true"></span> Source</a></li>
+        <li><a href="https://hadoop.apache.org"><span class="glyphicon glyphicon-new-window" aria-hidden="true"></span> Apache Hadoop</a></li>
+        <li><a href="https://apache.org"><span class="glyphicon glyphicon-new-window" aria-hidden="true"></span> ASF</a></li>
+    </ul></li>
   </ul>
 
 </div>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HDFS-13942. [JDK10] Fix javadoc errors in hadoop-hdfs module. Contributed by Dinesh Chitlangia.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
index 5e708be..a195bf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
@@ -47,7 +47,7 @@ public enum Quota {
 
   /**
    * Is quota violated?
-   * The quota is violated if quota is set and usage > quota. 
+   * The quota is violated if quota is set and usage &gt; quota.
    */
   public static boolean isViolated(final long quota, final long usage) {
     return quota >= 0 && usage > quota;
@@ -55,7 +55,8 @@ public enum Quota {
 
   /**
    * Is quota violated?
-   * The quota is violated if quota is set, delta > 0 and usage + delta > quota.
+   * The quota is violated if quota is set, delta &gt; 0 and
+   * usage + delta &gt; quota.
    */
   static boolean isViolated(final long quota, final long usage,
       final long delta) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index a8acccd..2e13df5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -319,7 +319,7 @@ public class ReencryptionHandler implements Runnable {
   /**
    * Main loop. It takes at most 1 zone per scan, and executes until the zone
    * is completed.
-   * {@see #reencryptEncryptionZoneInt(Long)}.
+   * {@link #reencryptEncryptionZone(long)}.
    */
   @Override
   public void run() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
index e1bf027..b6f4f64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
@@ -31,7 +31,7 @@ import com.google.common.base.Preconditions;
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
 
 /**
- * There are four types of extended attributes <XAttr> defined by the
+ * There are four types of extended attributes &lt;XAttr&gt; defined by the
  * following namespaces:
  * <br>
  * USER - extended user attributes: these can be assigned to files and
@@ -56,7 +56,7 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_
  *   is called on a file or directory in the /.reserved/raw HDFS directory
  *   hierarchy. These attributes can only be accessed by the user who have
  *   read access.
- * </br>
+ * <br>
  */
 @InterfaceAudience.Private
 public class XAttrPermissionFilter {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
index 1dab69c..d856f6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
@@ -33,7 +33,7 @@ public class XAttrStorage {
 
   /**
    * Reads the extended attribute of an inode by name with prefix.
-   * <p/>
+   * <p>
    *
    * @param inode INode to read
    * @param snapshotId the snapshotId of the requested path
@@ -48,11 +48,11 @@ public class XAttrStorage {
 
   /**
    * Reads the existing extended attributes of an inode.
-   * <p/>
+   * <p>
    * Must be called while holding the FSDirectory read lock.
    *
    * @param inodeAttr INodeAttributes to read.
-   * @return List<XAttr> <code>XAttr</code> list.
+   * @return {@code XAttr} list.
    */
   public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) {
     XAttrFeature f = inodeAttr.getXAttrFeature();
@@ -61,7 +61,7 @@ public class XAttrStorage {
   
   /**
    * Update xattrs of inode.
-   * <p/>
+   * <p>
    * Must be called while holding the FSDirectory write lock.
    * 
    * @param inode INode to update

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
index d115656..1ba59a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java
@@ -157,10 +157,10 @@ abstract class AbstractINodeDiffList<N extends INode,
   
   /**
    * Find the latest snapshot before a given snapshot.
-   * @param anchorId The returned snapshot's id must be <= or < this given 
-   *                 snapshot id.
-   * @param exclusive True means the returned snapshot's id must be < the given
-   *                  id, otherwise <=.
+   * @param anchorId The returned snapshot's id must be &lt;= or &lt; this
+   *                 given snapshot id.
+   * @param exclusive True means the returned snapshot's id must be &lt; the
+   *                  given id, otherwise &lt;=.
    * @return The id of the latest snapshot before the given snapshot.
    */
   public final int getPrior(int anchorId, boolean exclusive) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java
index 85d9a6d..705b8d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java
@@ -44,15 +44,15 @@ import java.util.Objects;
  * and created after a particular snapshot and before the next snapshot. The
  * sequence will look like this:
  * <p>
- * s0->s1->s2->s3->s4->s5->s6->s7->s8->s9.
+ * {@literal s0->s1->s2->s3->s4->s5->s6->s7->s8->s9}.
  * <p>
  * Assuming a skip interval of 3, which means a new diff will be added at a
  * level higher than the current level after we have  ore than 3 snapshots.
  * Next level promotion happens after 9 snapshots and so on.
  * <p>
- * level 2:   s08------------------------------->s9
- * level 1:   S02------->s35-------->s68-------->s9
- * level 0:  s0->s1->s2->s3->s4->s5->s6->s7->s8->s9
+ * level 2:   {@literal s08------------------------------->s9}
+ * level 1:   {@literal S02------->s35-------->s68-------->s9}
+ * level 0:  {@literal s0->s1->s2->s3->s4->s5->s6->s7->s8->s9}
  * <p>
  * s02 will be created by combining diffs for s0, s1, s2 once s3 gets created.
  * Similarly, s08 will be created by combining s02, s35 and s68 once s9 gets
@@ -143,6 +143,7 @@ public class DiffListBySkipList implements DiffList<DirectoryDiff> {
      * and level.
      *
      * @param diff The element to be stored in the node.
+     * @param level
      */
     SkipListNode(DirectoryDiff diff, int level) {
       this.diff = diff;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index 02b9cff..b3f8de9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -106,8 +106,8 @@ public class BlockStorageMovementNeeded {
    * Add the itemInfo to tracking list for which storage movement expected if
    * necessary.
    *
-   * @param itemInfoList
-   *          - List of child in the directory
+   * @param itemInfo
+   *          - child in the directory
    * @param scanCompleted
    *          -Indicates whether the ItemInfo start id directory has no more
    *          elements to scan.
@@ -191,7 +191,6 @@ public class BlockStorageMovementNeeded {
   /**
    * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
    * and notify to clean up required resources.
-   * @throws IOException
    */
   public synchronized void clearQueuesWithNotification() {
     // Remove xAttr from directories

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java
index d4e514b..be8d01f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/DatanodeCacheManager.java
@@ -69,7 +69,7 @@ public class DatanodeCacheManager {
 
   /**
    * Returns the live datanodes and its storage details, which has available
-   * space (> 0) to schedule block moves. This will return array of datanodes
+   * space (&gt; 0) to schedule block moves. This will return array of datanodes
    * from its local cache. It has a configurable refresh interval in millis and
    * periodically refresh the datanode cache by fetching latest
    * {@link Context#getLiveDatanodeStorageReport()} once it elapsed refresh

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
index 074eab6..14cf05d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
@@ -39,7 +39,7 @@ import com.google.common.annotations.VisibleForTesting;
  * configured by the administrator.
  *
  * <p>
- * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then
+ * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
  * it won't do anything, just maintains the sps invoked path ids. Administrator
  * requires to start external sps service explicitly, to fetch the sps invoked
  * path ids from namenode, then do necessary computations and block movement in
@@ -48,7 +48,7 @@ import com.google.common.annotations.VisibleForTesting;
  * external sps service functionality.
  *
  * <p>
- * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then it
+ * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then it
  * will disable the sps feature completely by clearing all queued up sps path's
  * hint.
  *
@@ -88,12 +88,12 @@ public class StoragePolicySatisfyManager {
    * This function will do following logic based on the configured sps mode:
    *
    * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then
+   * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
    * it won't do anything. Administrator requires to start external sps service
    * explicitly.
    *
    * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the
+   * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the
    * service is disabled and won't do any action.
    */
   public void start() {
@@ -121,12 +121,12 @@ public class StoragePolicySatisfyManager {
    * This function will do following logic based on the configured sps mode:
    *
    * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.EXTERNAL}, then
+   * If the configured mode is {@link StoragePolicySatisfierMode#EXTERNAL}, then
    * it won't do anything. Administrator requires to stop external sps service
    * explicitly, if needed.
    *
    * <p>
-   * If the configured mode is {@link StoragePolicySatisfierMode.NONE}, then the
+   * If the configured mode is {@link StoragePolicySatisfierMode#NONE}, then the
    * service is disabled and won't do any action.
    */
   public void stop() {
@@ -225,6 +225,7 @@ public class StoragePolicySatisfyManager {
 
   /**
    * Verify that satisfier queue limit exceeds allowed outstanding limit.
+   * @throws IOException
    */
   public void verifyOutstandingPathQLimit() throws IOException {
     long size = pathsToBeTraveresed.size();
@@ -269,6 +270,7 @@ public class StoragePolicySatisfyManager {
 
   /**
    * Adds the sps path to SPSPathIds list.
+   * @param id
    */
   public void addPathId(long id) {
     synchronized (pathsToBeTraveresed) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java
index 3101741..a4453a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressView.java
@@ -179,7 +179,7 @@ public class StartupProgressView {
   /**
    * Returns all phases.
    * 
-   * @return Iterable<Phase> containing all phases
+   * @return {@code Iterable<Phase>} containing all phases
    */
   public Iterable<Phase> getPhases() {
     return EnumSet.allOf(Phase.class);
@@ -189,7 +189,7 @@ public class StartupProgressView {
    * Returns all steps within a phase.
    * 
    * @param phase Phase to get
-   * @return Iterable<Step> all steps
+   * @return {@code Iterable<Step>} all steps
    */
   public Iterable<Step> getSteps(Phase phase) {
     return new TreeSet<Step>(phases.get(phase).steps.keySet());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
index 4d61d0f..72ec9f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
@@ -47,22 +47,22 @@ import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowMan
 
 /**
  * The interface to the top metrics.
- * <p/>
+ * <p>
  * Metrics are collected by a custom audit logger, {@link org.apache.hadoop
  * .hdfs.server.namenode.top.TopAuditLogger}, which calls TopMetrics to
  * increment per-operation, per-user counts on every audit log call. These
  * counts are used to show the top users by NameNode operation as well as
  * across all operations.
- * <p/>
+ * <p>
  * TopMetrics maintains these counts for a configurable number of time
  * intervals, e.g. 1min, 5min, 25min. Each interval is tracked by a
  * RollingWindowManager.
- * <p/>
+ * <p>
  * These metrics are published as a JSON string via {@link org.apache.hadoop
  * .hdfs.server .namenode.metrics.FSNamesystemMBean#getTopWindows}. This is
  * done by calling {@link org.apache.hadoop.hdfs.server.namenode.top.window
  * .RollingWindowManager#snapshot} on each RollingWindowManager.
- * <p/>
+ * <p>
  * Thread-safe: relies on thread-safety of RollingWindowManager
  */
 @InterfaceAudience.Private
@@ -119,6 +119,13 @@ public class TopMetrics implements MetricsSource {
    * log file. This is to be consistent when {@link TopMetrics} is charged with
    * data read back from log files instead of being invoked directly by the
    * FsNamesystem
+   * @param succeeded
+   * @param userName
+   * @param addr
+   * @param cmd
+   * @param src
+   * @param dst
+   * @param status
    */
   public void report(boolean succeeded, String userName, InetAddress addr,
       String cmd, String src, String dst, FileStatus status) {
@@ -147,6 +154,8 @@ public class TopMetrics implements MetricsSource {
    * {@link org.apache.hadoop.metrics2.MetricsRecord}s for consumption by
    * external metrics systems. Each metrics record added corresponds to the
    * reporting period a.k.a window length of the configured rolling windows.
+   * @param collector
+   * @param all
    */
   @Override
   public void getMetrics(MetricsCollector collector, boolean all) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java
index 63ff125..f927106 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java
@@ -29,23 +29,24 @@ import org.slf4j.LoggerFactory;
  * Events are reported based on occurrence time. The total number of events in
  * the last period covered by the rolling window can be retrieved by the
  * {@link #getSum(long)} method.
- * <p/>
+ * <p>
  *
  * Assumptions:
- * <p/>
+ * <p>
  *
  * (1) Concurrent invocation of {@link #incAt} method are possible
- * <p/>
+ * <p>
  *
  * (2) The time parameter of two consecutive invocation of {@link #incAt} could
  * be in any given order
- * <p/>
+ * <p>
  *
  * (3) The buffering delays are not more than the window length, i.e., after two
  * consecutive invocation {@link #incAt(long time1, long)} and
- * {@link #incAt(long time2, long)}, time1 < time2 || time1 - time2 < windowLenMs.
+ * {@link #incAt(long time2, long)}, time1 &lt; time2 || time1 - time2 &lt;
+ * windowLenMs.
  * This assumption helps avoiding unnecessary synchronizations.
- * <p/>
+ * <p>
  *
  * Thread-safety is built in the {@link RollingWindow.Bucket}
  */
@@ -85,7 +86,7 @@ public class RollingWindow {
   /**
    * When an event occurs at the specified time, this method reflects that in
    * the rolling window.
-   * <p/>
+   * <p>
    *
    * @param time the time at which the event occurred
    * @param delta the delta that will be added to the window
@@ -153,6 +154,7 @@ public class RollingWindow {
      * performed. We do not need to update the {@link #updateTime} because as
      * long as the {@link #updateTime} belongs to the current view of the
      * rolling window, the algorithm works fine.
+     * @param delta
      */
     void inc(long delta) {
       value.addAndGet(delta);
@@ -161,7 +163,7 @@ public class RollingWindow {
 
   /**
    * Get value represented by this window at the specified time
-   * <p/>
+   * <p>
    *
    * If time lags behind the latest update time, the new updates are still
    * included in the sum

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
index bdd0ab0..095294e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
@@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
  * A class to manage the set of {@link RollingWindow}s. This class is the
  * interface of metrics system to the {@link RollingWindow}s to retrieve the
  * current top metrics.
- * <p/>
+ * <p>
  * Thread-safety is provided by each {@link RollingWindow} being thread-safe as
  * well as {@link ConcurrentHashMap} for the collection of them.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
index e90317d..6b0b261 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockStorageMovementCommand.java
@@ -30,11 +30,12 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
  *
  * Upon receiving this command, this DataNode pass the array of block movement
  * details to
- * {@link org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker}
- * service. Later, StoragePolicySatisfyWorker will schedule block movement tasks
- * for these blocks and monitors the completion of each task. After the block
- * movement attempt is finished(with success or failure) this DataNode will send
- * response back to NameNode about the block movement attempt finished details.
+ * {@link org.apache.hadoop.hdfs.server.sps.ExternalSPSBlockMoveTaskHandler}
+ * service. Later, ExternalSPSBlockMoveTaskHandler will schedule block movement
+ * tasks for these blocks and monitors the completion of each task. After the
+ * block movement attempt is finished(with success or failure) this DataNode
+ * will send response back to NameNode about the block movement attempt
+ * finished details.
  */
 public class BlockStorageMovementCommand extends DatanodeCommand {
   private final String blockPoolId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index 311b68f..5680ef3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -158,7 +158,7 @@ public interface DatanodeProtocol {
    * {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[], BlockReportContext)},
    * which is used to communicated blocks stored on disk.
    *
-   * @param            The datanode registration.
+   * @param registration The datanode registration.
    * @param poolId     The block pool ID for the blocks.
    * @param blockIds   A list of block IDs.
    * @return           The DatanodeCommand.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
index f80477b..5d609de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
@@ -73,7 +73,7 @@ public interface NamenodeProtocol {
    * @param datanode  a data node
    * @param size      requested size
    * @param minBlockSize each block should be of this minimum Block Size
-   * @return          a list of blocks & their locations
+   * @return BlocksWithLocations a list of blocks &amp; their locations
    * @throws IOException if size is less than or equal to 0 or
   datanode does not exist
    */
@@ -183,7 +183,8 @@ public interface NamenodeProtocol {
   /**
    * Return a structure containing details about all edit logs
    * available to be fetched from the NameNode.
-   * @param sinceTxId return only logs that contain transactions >= sinceTxId
+   * @param sinceTxId return only logs that contain transactions {@literal >=}
+   * sinceTxId
    */
   @Idempotent
   public RemoteEditLogManifest getEditLogManifest(long sinceTxId)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
index 3ea0294..64dec8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
@@ -36,8 +36,10 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 3171497..2afc97c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -50,14 +50,17 @@ import org.apache.hadoop.util.ToolRunner;
  * <p>The tool scans all files and directories, starting from an indicated
  *  root path. The following abnormal conditions are detected and handled:</p>
  * <ul>
- * <li>files with blocks that are completely missing from all datanodes.<br/>
+ * <li>files with blocks that are completely missing from all datanodes.<br>
  * In this case the tool can perform one of the following actions:
  *  <ul>
- *      <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
  *      <li>move corrupted files to /lost+found directory on DFS
- *      ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
- *      block chains, representing longest consecutive series of valid blocks.</li>
- *      <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
+ *      ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#doMove}).
+ *      Remaining data blocks are saved as a
+ *      block chains, representing longest consecutive series of valid blocks.
+ *      </li>
+ *      <li>delete corrupted files
+ *      ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#doDelete})
+ *      </li>
  *  </ul>
  *  </li>
  *  <li>detect files with under-replicated or over-replicated blocks</li>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
index f075ed2..43eedf8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
@@ -121,8 +121,8 @@ public class OfflineEditsViewer extends Configured implements Tool {
 
   /** Process an edit log using the chosen processor or visitor.
    * 
-   * @param inputFilename   The file to process
-   * @param outputFilename  The output file name
+   * @param inputFileName   The file to process
+   * @param outputFileName  The output file name
    * @param processor       If visitor is null, the processor to use
    * @param visitor         If non-null, the visitor to use.
    * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java
index eb477e1..1383f4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java
@@ -52,7 +52,7 @@ abstract public interface OfflineEditsVisitor {
    * Begin visiting an element that encloses another element, such as
    * the beginning of the list of blocks that comprise a file.
    *
-   * @param value Token being visited
+   * @param op Token being visited
    */
   abstract void visitOp(FSEditLogOp op)
      throws IOException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java
index c84e2ed..cc97ea7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java
@@ -50,9 +50,7 @@ public class StatisticsEditsVisitor implements OfflineEditsVisitor {
    * Create a processor that writes to the file named and may or may not
    * also output to the screen, as specified.
    *
-   * @param filename Name of file to write output to
-   * @param tokenizer Input tokenizer
-   * @param printToScreen Mirror output to screen?
+   * @param out Name of file to write output to
    */
   public StatisticsEditsVisitor(OutputStream out) throws IOException {
     this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
index 28bcf10..9c7b7f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
@@ -26,11 +26,13 @@ import org.apache.hadoop.classification.InterfaceAudience;
 /**
  * File name distribution visitor. 
  * <p>
- * It analyzes file names in fsimage and prints the following information: 
+ * It analyzes file names in fsimage and prints the following information:
+ * <ul>
  * <li>Number of unique file names</li> 
  * <li>Number file names and the corresponding number range of files that use 
  * these same names</li>
  * <li>Heap saved if the file name objects are reused</li>
+ * </ul>
  */
 @InterfaceAudience.Private
 public class NameDistributionVisitor extends TextWriterImageVisitor {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
index 1f87a7a..188537b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
@@ -470,23 +470,23 @@ public class Diff<K, E extends Diff.Element<K>> {
    * <pre>
    * 1. For (c, 0) in the posterior diff, check the element in this diff:
    * 1.1 (c', 0)  in this diff: impossible
-   * 1.2 (0, d')  in this diff: put in c-list --> (c, d')
+   * 1.2 (0, d')  in this diff: put in c-list --&gt; (c, d')
    * 1.3 (c', d') in this diff: impossible
-   * 1.4 (0, 0)   in this diff: put in c-list --> (c, 0)
+   * 1.4 (0, 0)   in this diff: put in c-list --&gt; (c, 0)
    * This is the same logic as create(E).
    * 
    * 2. For (0, d) in the posterior diff,
-   * 2.1 (c', 0)  in this diff: remove from c-list --> (0, 0)
+   * 2.1 (c', 0)  in this diff: remove from c-list --&gt; (0, 0)
    * 2.2 (0, d')  in this diff: impossible
-   * 2.3 (c', d') in this diff: remove from c-list --> (0, d')
-   * 2.4 (0, 0)   in this diff: put in d-list --> (0, d)
+   * 2.3 (c', d') in this diff: remove from c-list --&gt; (0, d')
+   * 2.4 (0, 0)   in this diff: put in d-list --&gt; (0, d)
    * This is the same logic as delete(E).
    * 
    * 3. For (c, d) in the posterior diff,
-   * 3.1 (c', 0)  in this diff: replace the element in c-list --> (c, 0)
+   * 3.1 (c', 0)  in this diff: replace the element in c-list --&gt; (c, 0)
    * 3.2 (0, d')  in this diff: impossible
-   * 3.3 (c', d') in this diff: replace the element in c-list --> (c, d')
-   * 3.4 (0, 0)   in this diff: put in c-list and d-list --> (c, d)
+   * 3.3 (c', d') in this diff: replace the element in c-list --&gt; (c, d')
+   * 3.4 (0, 0)   in this diff: put in c-list and d-list --&gt; (c, d)
    * This is the same logic as modify(E, E).
    * </pre>
    * 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
index f23b021..8c783d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
@@ -116,8 +116,8 @@ public class XMLUtils {
    * 
    * There are three kinds of code points in XML:
    * - Those that can be represented normally,
-   * - Those that have to be escaped (for example, & must be represented 
-   *     as &amp;)
+   * - Those that have to be escaped (for example, &amp; must be represented
+   *     as {@literal &amp;})
    * - Those that cannot be represented at all in XML.
    *
    * The built-in SAX functions will handle the first two types for us just


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HDDS-754. VolumeInfo#getScmUsed throws NPE. Contributed by Hanisha Koneru.

Posted by su...@apache.org.
HDDS-754. VolumeInfo#getScmUsed throws NPE.
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/773f0d15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/773f0d15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/773f0d15

Branch: refs/heads/HDFS-12943
Commit: 773f0d1519715e3ddf77c139998cc12d7447da66
Parents: e33b61f
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Oct 30 19:17:57 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Oct 30 19:17:57 2018 -0700

----------------------------------------------------------------------
 .../container/common/volume/VolumeInfo.java      | 19 +++++++++++++++++--
 .../ozone/container/common/volume/VolumeSet.java | 11 +++++++----
 .../container/common/volume/TestHddsVolume.java  |  9 ++++++---
 .../container/common/volume/TestVolumeSet.java   |  4 +++-
 .../hdds/scm/pipeline/TestNodeFailure.java       |  3 ++-
 .../apache/hadoop/ozone/MiniOzoneCluster.java    |  8 ++++++++
 .../hadoop/ozone/MiniOzoneClusterImpl.java       | 16 +++++++++++++++-
 7 files changed, 58 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/773f0d15/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index 62fca63..0de9f18 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -95,15 +95,30 @@ public class VolumeInfo {
     this.usage = new VolumeUsage(root, b.conf);
   }
 
-  public long getCapacity() {
-    return configuredCapacity < 0 ? usage.getCapacity() : configuredCapacity;
+  public long getCapacity() throws IOException {
+    if (configuredCapacity < 0) {
+      if (usage == null) {
+        throw new IOException("Volume Usage thread is not running. This error" +
+            " is usually seen during DataNode shutdown.");
+      }
+      return usage.getCapacity();
+    }
+    return configuredCapacity;
   }
 
   public long getAvailable() throws IOException {
+    if (usage == null) {
+      throw new IOException("Volume Usage thread is not running. This error " +
+          "is usually seen during DataNode shutdown.");
+    }
     return usage.getAvailable();
   }
 
   public long getScmUsed() throws IOException {
+    if (usage == null) {
+      throw new IOException("Volume Usage thread is not running. This error " +
+          "is usually seen during DataNode shutdown.");
+    }
     return usage.getScmUsed();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773f0d15/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 5b6b823..d30dd89 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -372,18 +372,21 @@ public class VolumeSet {
       for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
         hddsVolume = entry.getValue();
         VolumeInfo volumeInfo = hddsVolume.getVolumeInfo();
-        long scmUsed = 0;
-        long remaining = 0;
+        long scmUsed;
+        long remaining;
+        long capacity;
         failed = false;
         try {
           scmUsed = volumeInfo.getScmUsed();
           remaining = volumeInfo.getAvailable();
+          capacity = volumeInfo.getCapacity();
         } catch (IOException ex) {
           LOG.warn("Failed to get scmUsed and remaining for container " +
-              "storage location {}", volumeInfo.getRootDir());
+              "storage location {}", volumeInfo.getRootDir(), ex);
           // reset scmUsed and remaining if df/du failed.
           scmUsed = 0;
           remaining = 0;
+          capacity = 0;
           failed = true;
         }
 
@@ -392,7 +395,7 @@ public class VolumeSet {
         builder.setStorageLocation(volumeInfo.getRootDir())
             .setId(hddsVolume.getStorageID())
             .setFailed(failed)
-            .setCapacity(hddsVolume.getCapacity())
+            .setCapacity(capacity)
             .setRemaining(remaining)
             .setScmUsed(scmUsed)
             .setStorageType(hddsVolume.getStorageType());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773f0d15/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 7755345..6b46762 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -31,6 +31,7 @@ import org.junit.rules.TemporaryFolder;
 import org.mockito.Mockito;
 
 import java.io.File;
+import java.io.IOException;
 import java.util.Properties;
 import java.util.UUID;
 
@@ -134,12 +135,14 @@ public class TestHddsVolume {
         scmUsedFile.exists());
 
     try {
-      // Volume.getAvailable() should fail with NullPointerException as usage
-      // is shutdown.
+      // Volume.getAvailable() should fail with IOException
+      // as usage thread is shutdown.
       volume.getAvailable();
       fail("HddsVolume#shutdown test failed");
     } catch (Exception ex){
-      assertTrue(ex instanceof NullPointerException);
+      assertTrue(ex instanceof IOException);
+      assertTrue(ex.getMessage().contains(
+          "Volume Usage thread is not running."));
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773f0d15/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index fca68b1..7bb8a43 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -222,8 +222,10 @@ public class TestVolumeSet {
         // getAvailable() should throw null pointer exception as usage is null.
         volume.getAvailable();
         fail("Volume shutdown failed.");
-      } catch (NullPointerException ex) {
+      } catch (IOException ex) {
         // Do Nothing. Exception is expected.
+        assertTrue(ex.getMessage().contains(
+            "Volume Usage thread is not running."));
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773f0d15/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
index 9a1c705..618cd8e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
@@ -99,7 +99,7 @@ public class TestNodeFailure {
     }
   }
 
-  @Test
+  @Test(timeout = 300_000L)
   public void testPipelineFail() throws InterruptedException, IOException,
       TimeoutException {
     Assert.assertEquals(ratisContainer1.getPipeline().getPipelineState(),
@@ -118,6 +118,7 @@ public class TestNodeFailure {
         pipelineManager.getPipeline(ratisContainer2.getPipeline().getId())
             .getPipelineState());
     // Now restart the datanode and make sure that a new pipeline is created.
+    cluster.setWaitForClusterToBeReadyTimeout(300000);
     cluster.restartHddsDatanode(dnToFail, true);
     ContainerWithPipeline ratisContainer3 =
         containerManager.allocateContainer(RATIS, THREE, "testOwner");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773f0d15/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 3aad7f7..15bf8d0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -67,6 +67,14 @@ public interface MiniOzoneCluster {
   void waitForClusterToBeReady() throws TimeoutException, InterruptedException;
 
   /**
+   * Sets the timeout value after which
+   * {@link MiniOzoneCluster#waitForClusterToBeReady} times out.
+   *
+   * @param timeoutInMs timeout value in milliseconds
+   */
+  void setWaitForClusterToBeReadyTimeout(int timeoutInMs);
+
+  /**
    * Waits/blocks till the cluster is out of chill mode.
    *
    * @throws TimeoutException TimeoutException In case of timeout

http://git-wip-us.apache.org/repos/asf/hadoop/blob/773f0d15/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 11bc0e0..6c0f408 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -90,6 +90,9 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
   private final OzoneManager ozoneManager;
   private final List<HddsDatanodeService> hddsDatanodes;
 
+  // Timeout for the cluster to be ready
+  private int waitForClusterToBeReadyTimeout = 60000; // 1 min
+
   /**
    * Creates a new MiniOzoneCluster.
    *
@@ -122,7 +125,18 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
           isReady? "Cluster is ready" : "Waiting for cluster to be ready",
           healthy, hddsDatanodes.size());
       return isReady;
-    }, 1000, 60 * 1000); //wait for 1 min.
+    }, 1000, waitForClusterToBeReadyTimeout);
+  }
+
+  /**
+   * Sets the timeout value after which
+   * {@link MiniOzoneClusterImpl#waitForClusterToBeReady} times out.
+   *
+   * @param timeoutInMs timeout value in milliseconds
+   */
+  @Override
+  public void setWaitForClusterToBeReadyTimeout(int timeoutInMs) {
+    waitForClusterToBeReadyTimeout = timeoutInMs;
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDDS-743. S3 multi delete request should return XML header in quiet mode. Contributed by Elek Marton.

Posted by su...@apache.org.
HDDS-743. S3 multi delete request should return XML header in quiet mode. Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3655e573
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3655e573
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3655e573

Branch: refs/heads/HDFS-12943
Commit: 3655e573e28eea79e46936d348a852158b2fc48a
Parents: a58048e
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon Oct 29 12:59:41 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon Oct 29 12:59:41 2018 -0700

----------------------------------------------------------------------
 .../ozone/s3/endpoint/BucketEndpoint.java       |  9 +--
 .../s3/endpoint/TestObjectMultiDelete.java      | 61 +++++++++++++++-----
 2 files changed, 48 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3655e573/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index fca251d..8f554ed 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -223,7 +223,7 @@ public class BucketEndpoint extends EndpointBase {
    */
   @POST
   @Produces(MediaType.APPLICATION_XML)
-  public Response multiDelete(@PathParam("bucket") String bucketName,
+  public MultiDeleteResponse multiDelete(@PathParam("bucket") String bucketName,
       @QueryParam("delete") String delete,
       MultiDeleteRequest request) throws OS3Exception, IOException {
     OzoneBucket bucket = getBucket(bucketName);
@@ -251,11 +251,6 @@ public class BucketEndpoint extends EndpointBase {
         }
       }
     }
-    ResponseBuilder response = Response.ok();
-    if (!request.isQuiet() || result.getErrors().size() > 0) {
-      response = response.entity(result);
-    }
-    return response.build();
-
+    return result;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3655e573/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
index 12a14a0..dabbd22 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java
@@ -46,16 +46,7 @@ public class TestObjectMultiDelete {
   public void delete() throws IOException, OS3Exception, JAXBException {
     //GIVEN
     OzoneClient client = new OzoneClientStub();
-    client.getObjectStore().createS3Bucket("bilbo", "b1");
-
-    String volumeName = client.getObjectStore().getOzoneVolumeName("b1");
-
-    OzoneBucket bucket =
-        client.getObjectStore().getVolume(volumeName).getBucket("b1");
-
-    bucket.createKey("key1", 0).close();
-    bucket.createKey("key2", 0).close();
-    bucket.createKey("key3", 0).close();
+    OzoneBucket bucket = initTestData(client);
 
     BucketEndpoint rest = new BucketEndpoint();
     rest.setClient(client);
@@ -66,11 +57,9 @@ public class TestObjectMultiDelete {
     mdr.getObjects().add(new DeleteObject("key4"));
 
     //WHEN
-    Response response = rest.multiDelete("b1", "", mdr);
+    MultiDeleteResponse response = rest.multiDelete("b1", "", mdr);
 
     //THEN
-    MultiDeleteResponse mdresponse = (MultiDeleteResponse) response.getEntity();
-
     Set<String> keysAtTheEnd = Sets.newHashSet(bucket.listKeys("")).stream()
         .map(OzoneKey::getName)
         .collect(Collectors.toSet());
@@ -80,7 +69,49 @@ public class TestObjectMultiDelete {
 
     //THEN
     Assert.assertEquals(expectedResult, keysAtTheEnd);
-    Assert.assertEquals(3, mdresponse.getDeletedObjects().size());
-    Assert.assertEquals(0, mdresponse.getErrors().size());
+    Assert.assertEquals(3, response.getDeletedObjects().size());
+    Assert.assertEquals(0, response.getErrors().size());
+  }
+
+  @Test
+  public void deleteQuiet() throws IOException, OS3Exception, JAXBException {
+    //GIVEN
+    OzoneClient client = new OzoneClientStub();
+    OzoneBucket bucket = initTestData(client);
+
+    BucketEndpoint rest = new BucketEndpoint();
+    rest.setClient(client);
+
+    MultiDeleteRequest mdr = new MultiDeleteRequest();
+    mdr.setQuiet(true);
+    mdr.getObjects().add(new DeleteObject("key1"));
+    mdr.getObjects().add(new DeleteObject("key2"));
+    mdr.getObjects().add(new DeleteObject("key4"));
+
+    //WHEN
+    MultiDeleteResponse response = rest.multiDelete("b1", "", mdr);
+
+    //THEN
+    Set<String> keysAtTheEnd = Sets.newHashSet(bucket.listKeys("")).stream()
+        .map(OzoneKey::getName)
+        .collect(Collectors.toSet());
+
+    //THEN
+    Assert.assertEquals(0, response.getDeletedObjects().size());
+    Assert.assertEquals(0, response.getErrors().size());
+  }
+
+  private OzoneBucket initTestData(OzoneClient client) throws IOException {
+    client.getObjectStore().createS3Bucket("bilbo", "b1");
+
+    String volumeName = client.getObjectStore().getOzoneVolumeName("b1");
+
+    OzoneBucket bucket =
+        client.getObjectStore().getVolume(volumeName).getBucket("b1");
+
+    bucket.createKey("key1", 0).close();
+    bucket.createKey("key2", 0).close();
+    bucket.createKey("key3", 0).close();
+    return bucket;
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HADOOP-15886. Fix findbugs warnings in RegistryDNS.java.

Posted by su...@apache.org.
HADOOP-15886. Fix findbugs warnings in RegistryDNS.java.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f747f5b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f747f5b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f747f5b0

Branch: refs/heads/HDFS-12943
Commit: f747f5b06cb0da59c7c20b9f0e46d3eec9622eed
Parents: 277a3d8
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue Oct 30 11:43:36 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Oct 31 10:01:31 2018 +0900

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            | 33 ++++++++++++++++++++
 hadoop-common-project/hadoop-registry/pom.xml   | 10 ++++++
 .../dev-support/findbugs-exclude.xml            | 16 ----------
 3 files changed, 43 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f747f5b0/hadoop-common-project/hadoop-registry/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/dev-support/findbugs-exclude.xml b/hadoop-common-project/hadoop-registry/dev-support/findbugs-exclude.xml
new file mode 100644
index 0000000..dc7b139
--- /dev/null
+++ b/hadoop-common-project/hadoop-registry/dev-support/findbugs-exclude.xml
@@ -0,0 +1,33 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+  <Match>
+    <Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
+    <Method name="addNIOTCP" />
+    <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
+    <Method name="addNIOUDP" />
+    <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
+    <Method name="serveNIOTCP" />
+    <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
+  </Match>
+</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f747f5b0/hadoop-common-project/hadoop-registry/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-registry/pom.xml b/hadoop-common-project/hadoop-registry/pom.xml
index ef9f3ef..7ca1c9e 100644
--- a/hadoop-common-project/hadoop-registry/pom.xml
+++ b/hadoop-common-project/hadoop-registry/pom.xml
@@ -163,6 +163,16 @@
     </resources>
     <plugins>
       <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <findbugsXmlOutput>true</findbugsXmlOutput>
+          <xmlOutput>true</xmlOutput>
+          <excludeFilterFile>${project.basedir}/dev-support/findbugs-exclude.xml</excludeFilterFile>
+          <effort>Max</effort>
+        </configuration>
+      </plugin>
+      <plugin>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f747f5b0/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 216c3bd..dd42129 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -639,22 +639,6 @@
     <Bug pattern="MS_EXPOSE_REP" />
   </Match>
 
-  <Match>
-    <Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
-    <Method name="addNIOTCP" />
-    <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
-    <Method name="addNIOUDP" />
-    <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.registry.server.dns.RegistryDNS" />
-    <Method name="serveNIOTCP" />
-    <Bug pattern="RV_RETURN_VALUE_IGNORED_BAD_PRACTICE" />
-  </Match>
-
   <!-- EQ_OVERRIDING_EQUALS_NOT_SYMMETRIC -->
   <Match>
     <Class name="org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.AssignedGpuDevice" />


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: HDDS-573. Make VirtualHostStyleFilter port agnostic. Contributed by Danilo Perez.

Posted by su...@apache.org.
HDDS-573. Make VirtualHostStyleFilter port agnostic. Contributed by Danilo Perez.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfb720eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfb720eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfb720eb

Branch: refs/heads/HDFS-12943
Commit: bfb720ebc84c976e264971fa655515093a695515
Parents: 78ea897
Author: Márton Elek <el...@apache.org>
Authored: Mon Oct 29 14:45:01 2018 +0100
Committer: Márton Elek <el...@apache.org>
Committed: Mon Oct 29 14:45:01 2018 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java  | 9 +++++++++
 .../apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java  | 4 +++-
 2 files changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfb720eb/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
index d4e7547..50014fe 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java
@@ -74,6 +74,7 @@ public class VirtualHostStyleFilter implements ContainerRequestFilter {
     }
     //Get the value of the host
     String host = requestContext.getHeaderString(HttpHeaders.HOST);
+    host = checkHostWithoutPort(host);
     String domain = getDomainName(host);
 
     if (domain == null) {
@@ -148,6 +149,14 @@ public class VirtualHostStyleFilter implements ContainerRequestFilter {
     return match;
   }
 
+  private String checkHostWithoutPort(String host) {
+    if (host.contains(":")){
+      return host.substring(0, host.lastIndexOf(":"));
+    } else {
+      return host;
+    }
+  }
+
   @VisibleForTesting
   public void setAuthenticationHeaderParser(AuthenticationHeaderParser parser) {
     this.authenticationHeaderParser = parser;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfb720eb/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
index 1b706da..eead447 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java
@@ -49,6 +49,7 @@ public class TestVirtualHostStyleFilter {
     conf = new OzoneConfiguration();
     s3HttpAddr = "localhost:9878";
     conf.set(S3GatewayConfigKeys.OZONE_S3G_HTTP_ADDRESS_KEY, s3HttpAddr);
+    s3HttpAddr = s3HttpAddr.substring(0, s3HttpAddr.lastIndexOf(":"));
     conf.set(S3GatewayConfigKeys.OZONE_S3G_DOMAIN_NAME, s3HttpAddr);
     authenticationHeaderParser = new AuthenticationHeaderParser();
     authenticationHeaderParser.setAuthHeader("AWS ozone:scret");
@@ -185,9 +186,10 @@ public class TestVirtualHostStyleFilter {
         authenticationHeaderParser);
 
     ContainerRequest containerRequest = createContainerRequest("mybucket" +
-        ".localhost:9999", null, null, true);
+        ".myhost:9999", null, null, true);
     try {
       virtualHostStyleFilter.filter(containerRequest);
+      fail("testVirtualHostStyleWithNoMatchingDomain");
     } catch (InvalidRequestException ex) {
       GenericTestUtils.assertExceptionContains("No matching domain", ex);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: YARN-8854. Upgrade jquery datatable version references to v1.10.19. Contributed by Akhil PB.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css
deleted file mode 100644
index 37b9203..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- *  File:         demo_table.css
- *  CVS:          $Id$
- *  Description:  CSS descriptions for DataTables demo pages
- *  Author:       Allan Jardine
- *  Created:      Tue May 12 06:47:22 BST 2009
- *  Modified:     $Date$ by $Author$
- *  Language:     CSS
- *  Project:      DataTables
- *
- *  Copyright 2009 Allan Jardine. All Rights Reserved.
- *
- * ***************************************************************************
- * DESCRIPTION
- *
- * The styles given here are suitable for the demos that are used with the standard DataTables
- * distribution (see www.datatables.net). You will most likely wish to modify these styles to
- * meet the layout requirements of your site.
- *
- * Common issues:
- *   'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
- *     no conflict between the two pagination types. If you want to use full_numbers pagination
- *     ensure that you either have "example_alt_pagination" as a body class name, or better yet,
- *     modify that selector.
- *   Note that the path used for Images is relative. All images are by default located in
- *     ../images/ - relative to this CSS file.
- */
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables features
- */
-
-.dataTables_wrapper {
-	position: relative;
-	min-height: 302px;
-	clear: both;
-	_height: 302px;
-	zoom: 1; /* Feeling sorry for IE */
-}
-
-.dataTables_processing {
-	position: absolute;
-	top: 50%;
-	left: 50%;
-	width: 250px;
-	height: 30px;
-	margin-left: -125px;
-	margin-top: -15px;
-	padding: 14px 0 2px 0;
-	border: 1px solid #ddd;
-	text-align: center;
-	color: #999;
-	font-size: 14px;
-	background-color: white;
-}
-
-.dataTables_length {
-	width: 40%;
-	float: left;
-}
-
-.dataTables_filter {
-	width: 50%;
-	float: right;
-	text-align: right;
-}
-
-.dataTables_info {
-	width: 60%;
-	float: left;
-}
-
-.dataTables_paginate {
-	width: 44px;
-	* width: 50px;
-	float: right;
-	text-align: right;
-}
-
-/* Pagination nested */
-.paginate_disabled_previous, .paginate_enabled_previous, .paginate_disabled_next, .paginate_enabled_next {
-	height: 19px;
-	width: 19px;
-	margin-left: 3px;
-	float: left;
-}
-
-.paginate_disabled_previous {
-	background-image: url('../images/back_disabled.jpg');
-}
-
-.paginate_enabled_previous {
-	background-image: url('../images/back_enabled.jpg');
-}
-
-.paginate_disabled_next {
-	background-image: url('../images/forward_disabled.jpg');
-}
-
-.paginate_enabled_next {
-	background-image: url('../images/forward_enabled.jpg');
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables display
- */
-table.display {
-	margin: 0 auto;
-	clear: both;
-	width: 100%;
-
-	/* Note Firefox 3.5 and before have a bug with border-collapse
-	 * ( https://bugzilla.mozilla.org/show%5Fbug.cgi?id=155955 )
-	 * border-spacing: 0; is one possible option. Conditional-css.com is
-	 * useful for this kind of thing
-	 *
-	 * Further note IE 6/7 has problems when calculating widths with border width.
-	 * It subtracts one px relative to the other browsers from the first column, and
-	 * adds one to the end...
-	 *
-	 * If you want that effect I'd suggest setting a border-top/left on th/td's and
-	 * then filling in the gaps with other borders.
-	 */
-}
-
-table.display thead th {
-	padding: 3px 18px 3px 10px;
-	border-bottom: 1px solid black;
-	font-weight: bold;
-	cursor: pointer;
-	* cursor: hand;
-}
-
-table.display tfoot th {
-	padding: 3px 18px 3px 10px;
-	border-top: 1px solid black;
-	font-weight: bold;
-}
-
-table.display tr.heading2 td {
-	border-bottom: 1px solid #aaa;
-}
-
-table.display td {
-	padding: 3px 10px;
-}
-
-table.display td.center {
-	text-align: center;
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables sorting
- */
-
-.sorting_asc {
-	background: url('../images/sort_asc.png') no-repeat center right;
-}
-
-.sorting_desc {
-	background: url('../images/sort_desc.png') no-repeat center right;
-}
-
-.sorting {
-	background: url('../images/sort_both.png') no-repeat center right;
-}
-
-.sorting_asc_disabled {
-	background: url('../images/sort_asc_disabled.png') no-repeat center right;
-}
-
-.sorting_desc_disabled {
-	background: url('../images/sort_desc_disabled.png') no-repeat center right;
-}
-
-
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables row classes
- */
-table.display tr.odd.gradeA {
-	background-color: #ddffdd;
-}
-
-table.display tr.even.gradeA {
-	background-color: #eeffee;
-}
-
-table.display tr.odd.gradeC {
-	background-color: #ddddff;
-}
-
-table.display tr.even.gradeC {
-	background-color: #eeeeff;
-}
-
-table.display tr.odd.gradeX {
-	background-color: #ffdddd;
-}
-
-table.display tr.even.gradeX {
-	background-color: #ffeeee;
-}
-
-table.display tr.odd.gradeU {
-	background-color: #ddd;
-}
-
-table.display tr.even.gradeU {
-	background-color: #eee;
-}
-
-
-tr.odd {
-	background-color: #E2E4FF;
-}
-
-tr.even {
-	background-color: white;
-}
-
-
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Misc
- */
-.dataTables_scroll {
-	clear: both;
-}
-
-.dataTables_scrollBody {
-	*margin-top: -1px;
-}
-
-.top, .bottom {
-	padding: 15px;
-	background-color: #F5F5F5;
-	border: 1px solid #CCCCCC;
-}
-
-.top .dataTables_info {
-	float: none;
-}
-
-.clear {
-	clear: both;
-}
-
-.dataTables_empty {
-	text-align: center;
-}
-
-tfoot input {
-	margin: 0.5em 0;
-	width: 100%;
-	color: #444;
-}
-
-tfoot input.search_init {
-	color: #999;
-}
-
-td.group {
-	background-color: #d1cfd0;
-	border-bottom: 2px solid #A19B9E;
-	border-top: 2px solid #A19B9E;
-}
-
-td.details {
-	background-color: #d1cfd0;
-	border: 2px solid #A19B9E;
-}
-
-
-.example_alt_pagination div.dataTables_info {
-	width: 40%;
-}
-
-.paging_full_numbers {
-	width: 400px;
-	height: 22px;
-	line-height: 22px;
-}
-
-.paging_full_numbers span.paginate_button,
- 	.paging_full_numbers span.paginate_active {
-	border: 1px solid #aaa;
-	-webkit-border-radius: 5px;
-	-moz-border-radius: 5px;
-	padding: 2px 5px;
-	margin: 0 3px;
-	cursor: pointer;
-	*cursor: hand;
-}
-
-.paging_full_numbers span.paginate_button {
-	background-color: #ddd;
-}
-
-.paging_full_numbers span.paginate_button:hover {
-	background-color: #ccc;
-}
-
-.paging_full_numbers span.paginate_active {
-	background-color: #99B3FF;
-}
-
-table.display tr.even.row_selected td {
-	background-color: #B0BED9;
-}
-
-table.display tr.odd.row_selected td {
-	background-color: #9FAFD1;
-}
-
-
-/*
- * Sorting classes for columns
- */
-/* For the standard odd/even */
-tr.odd td.sorting_1 {
-	background-color: #D3D6FF;
-}
-
-tr.odd td.sorting_2 {
-	background-color: #DADCFF;
-}
-
-tr.odd td.sorting_3 {
-	background-color: #E0E2FF;
-}
-
-tr.even td.sorting_1 {
-	background-color: #EAEBFF;
-}
-
-tr.even td.sorting_2 {
-	background-color: #F2F3FF;
-}
-
-tr.even td.sorting_3 {
-	background-color: #F9F9FF;
-}
-
-
-/* For the Conditional-CSS grading rows */
-/*
- 	Colour calculations (based off the main row colours)
-  Level 1:
-		dd > c4
-		ee > d5
-	Level 2:
-	  dd > d1
-	  ee > e2
- */
-tr.odd.gradeA td.sorting_1 {
-	background-color: #c4ffc4;
-}
-
-tr.odd.gradeA td.sorting_2 {
-	background-color: #d1ffd1;
-}
-
-tr.odd.gradeA td.sorting_3 {
-	background-color: #d1ffd1;
-}
-
-tr.even.gradeA td.sorting_1 {
-	background-color: #d5ffd5;
-}
-
-tr.even.gradeA td.sorting_2 {
-	background-color: #e2ffe2;
-}
-
-tr.even.gradeA td.sorting_3 {
-	background-color: #e2ffe2;
-}
-
-tr.odd.gradeC td.sorting_1 {
-	background-color: #c4c4ff;
-}
-
-tr.odd.gradeC td.sorting_2 {
-	background-color: #d1d1ff;
-}
-
-tr.odd.gradeC td.sorting_3 {
-	background-color: #d1d1ff;
-}
-
-tr.even.gradeC td.sorting_1 {
-	background-color: #d5d5ff;
-}
-
-tr.even.gradeC td.sorting_2 {
-	background-color: #e2e2ff;
-}
-
-tr.even.gradeC td.sorting_3 {
-	background-color: #e2e2ff;
-}
-
-tr.odd.gradeX td.sorting_1 {
-	background-color: #ffc4c4;
-}
-
-tr.odd.gradeX td.sorting_2 {
-	background-color: #ffd1d1;
-}
-
-tr.odd.gradeX td.sorting_3 {
-	background-color: #ffd1d1;
-}
-
-tr.even.gradeX td.sorting_1 {
-	background-color: #ffd5d5;
-}
-
-tr.even.gradeX td.sorting_2 {
-	background-color: #ffe2e2;
-}
-
-tr.even.gradeX td.sorting_3 {
-	background-color: #ffe2e2;
-}
-
-tr.odd.gradeU td.sorting_1 {
-	background-color: #c4c4c4;
-}
-
-tr.odd.gradeU td.sorting_2 {
-	background-color: #d1d1d1;
-}
-
-tr.odd.gradeU td.sorting_3 {
-	background-color: #d1d1d1;
-}
-
-tr.even.gradeU td.sorting_1 {
-	background-color: #d5d5d5;
-}
-
-tr.even.gradeU td.sorting_2 {
-	background-color: #e2e2e2;
-}
-
-tr.even.gradeU td.sorting_3 {
-	background-color: #e2e2e2;
-}
-
-
-/*
- * Row highlighting example
- */
-.ex_highlight #example tbody tr.even:hover, #example tbody tr.even td.highlighted {
-	background-color: #ECFFB3;
-}
-
-.ex_highlight #example tbody tr.odd:hover, #example tbody tr.odd td.highlighted {
-	background-color: #E6FF99;
-}
-
-.ex_highlight_row #example tr.even:hover {
-	background-color: #ECFFB3;
-}
-
-.ex_highlight_row #example tr.even:hover td.sorting_1 {
-	background-color: #DDFF75;
-}
-
-.ex_highlight_row #example tr.even:hover td.sorting_2 {
-	background-color: #E7FF9E;
-}
-
-.ex_highlight_row #example tr.even:hover td.sorting_3 {
-	background-color: #E2FF89;
-}
-
-.ex_highlight_row #example tr.odd:hover {
-	background-color: #E6FF99;
-}
-
-.ex_highlight_row #example tr.odd:hover td.sorting_1 {
-	background-color: #D6FF5C;
-}
-
-.ex_highlight_row #example tr.odd:hover td.sorting_2 {
-	background-color: #E0FF84;
-}
-
-.ex_highlight_row #example tr.odd:hover td.sorting_3 {
-	background-color: #DBFF70;
-}
-
-
-/*
- * KeyTable
- */
-table.KeyTable td {
-	border: 3px solid transparent;
-}
-
-table.KeyTable td.focus {
-	border: 3px solid #3366FF;
-}
-
-table.display tr.gradeA {
-	background-color: #eeffee;
-}
-
-table.display tr.gradeC {
-	background-color: #ddddff;
-}
-
-table.display tr.gradeX {
-	background-color: #ffdddd;
-}
-
-table.display tr.gradeU {
-	background-color: #ddd;
-}
-
-div.box {
-	height: 100px;
-	padding: 10px;
-	overflow: auto;
-	border: 1px solid #8080FF;
-	background-color: #E5E5FF;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
deleted file mode 100644
index de8faea..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- *  File:         demo_table_jui.css
- *  CVS:          $Id$
- *  Description:  CSS descriptions for DataTables demo pages
- *  Author:       Allan Jardine
- *  Created:      Tue May 12 06:47:22 BST 2009
- *  Modified:     $Date$ by $Author$
- *  Language:     CSS
- *  Project:      DataTables
- *
- *  Copyright 2009 Allan Jardine. All Rights Reserved.
- *
- * ***************************************************************************
- * DESCRIPTION
- *
- * The styles given here are suitable for the demos that are used with the standard DataTables
- * distribution (see www.datatables.net). You will most likely wish to modify these styles to
- * meet the layout requirements of your site.
- *
- * Common issues:
- *   'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
- *     no conflict between the two pagination types. If you want to use full_numbers pagination
- *     ensure that you either have "example_alt_pagination" as a body class name, or better yet,
- *     modify that selector.
- *   Note that the path used for Images is relative. All images are by default located in
- *     ../images/ - relative to this CSS file.
- */
-
-
-/*
- * jQuery UI specific styling
- */
-
-.paging_two_button .ui-button {
-	float: left;
-	cursor: pointer;
-	* cursor: hand;
-}
-
-.paging_full_numbers .ui-button {
-	padding: 2px 6px;
-	margin: 0;
-	cursor: pointer;
-	* cursor: hand;
-}
-
-.ui-buttonset .ui-button {
-	margin-right: -0.1em !important;
-}
-
-.paging_full_numbers {
-	width: 350px !important;
-}
-
-.ui-toolbar {
-	padding: 5px;
-}
-
-.dataTables_paginate {
-	width: auto;
-}
-
-.dataTables_info {
-	padding-top: 3px;
-}
-
-table.display thead th {
-	padding: 3px 0px 3px 10px;
-	cursor: pointer;
-	* cursor: hand;
-}
-
-div.dataTables_wrapper .ui-widget-header {
-	font-weight: normal;
-}
-
-
-/*
- * Sort arrow icon positioning
- */
-table.display thead th div.DataTables_sort_wrapper {
-	position: relative;
-	padding-right: 20px;
-	padding-right: 20px;
-}
-
-table.display thead th div.DataTables_sort_wrapper span {
-	position: absolute;
-	top: 50%;
-	margin-top: -8px;
-	right: 0;
-}
-
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- *
- * Everything below this line is the same as demo_table.css. This file is
- * required for 'cleanliness' of the markup
- *
- * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables features
- */
-
-.dataTables_wrapper {
-	position: relative;
-	min-height: 35px;
-	_height: 35px;
-	clear: both;
-}
-
-.dataTables_processing {
-	position: absolute;
-	top: 0px;
-	left: 50%;
-	width: 250px;
-	margin-left: -125px;
-	border: 1px solid #ddd;
-	text-align: center;
-	color: #999;
-	font-size: 11px;
-	padding: 2px 0;
-}
-
-.dataTables_length {
-	width: 40%;
-	float: left;
-}
-
-.dataTables_filter {
-	width: 50%;
-	float: right;
-	text-align: right;
-}
-
-.dataTables_info {
-	width: 50%;
-	float: left;
-}
-
-.dataTables_paginate {
-	float: right;
-	text-align: right;
-}
-
-/* Pagination nested */
-.paginate_disabled_previous, .paginate_enabled_previous, .paginate_disabled_next, .paginate_enabled_next {
-	height: 19px;
-	width: 19px;
-	margin-left: 3px;
-	float: left;
-}
-
-.paginate_disabled_previous {
-	background-image: url('../images/back_disabled.jpg');
-}
-
-.paginate_enabled_previous {
-	background-image: url('../images/back_enabled.jpg');
-}
-
-.paginate_disabled_next {
-	background-image: url('../images/forward_disabled.jpg');
-}
-
-.paginate_enabled_next {
-	background-image: url('../images/forward_enabled.jpg');
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables display
- */
-table.display {
-	margin: 0 auto;
-	width: 100%;
-	clear: both;
-	border-collapse: collapse;
-}
-
-table.display tfoot th {
-	padding: 3px 0px 3px 10px;
-	font-weight: bold;
-	font-weight: normal;
-}
-
-table.display tr.heading2 td {
-	border-bottom: 1px solid #aaa;
-}
-
-table.display td {
-	padding: 3px 10px;
-}
-
-table.display td.center {
-	text-align: center;
-}
-
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * DataTables sorting
- */
-
-.sorting_asc {
-	background: url('../images/sort_asc.jpg') no-repeat center right;
-}
-
-.sorting_desc {
-	background: url('../images/sort_desc.jpg') no-repeat center right;
-}
-
-.sorting {
-	background: url('../images/sort_both.jpg') no-repeat center right;
-}
-
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * Misc
- */
-.dataTables_scroll {
-	clear: both;
-}
-
-.top, .bottom {
-	padding: 15px;
-	background-color: #F5F5F5;
-	border: 1px solid #CCCCCC;
-}
-
-.top .dataTables_info {
-	float: none;
-}
-
-.clear {
-	clear: both;
-}
-
-.dataTables_empty {
-	text-align: center;
-}
-
-tfoot input {
-	margin: 0.5em 0;
-	width: 100%;
-	color: #444;
-}
-
-tfoot input.search_init {
-	color: #999;
-}
-
-td.group {
-	background-color: #d1cfd0;
-	border-bottom: 2px solid #A19B9E;
-	border-top: 2px solid #A19B9E;
-}
-
-td.details {
-	background-color: #d1cfd0;
-	border: 2px solid #A19B9E;
-}
-
-
-.example_alt_pagination div.dataTables_info {
-	width: 40%;
-}
-
-.paging_full_numbers span.paginate_button,
- 	.paging_full_numbers span.paginate_active {
-	border: 1px solid #aaa;
-	-webkit-border-radius: 5px;
-	-moz-border-radius: 5px;
-	padding: 2px 5px;
-	margin: 0 3px;
-	cursor: pointer;
-	*cursor: hand;
-}
-
-.paging_full_numbers span.paginate_button {
-	background-color: #ddd;
-}
-
-.paging_full_numbers span.paginate_button:hover {
-	background-color: #ccc;
-}
-
-.paging_full_numbers span.paginate_active {
-	background-color: #99B3FF;
-}
-
-table.display tr.even.row_selected td {
-	background-color: #B0BED9;
-}
-
-table.display tr.odd.row_selected td {
-	background-color: #9FAFD1;
-}
-
-/* Striping */
-tr.odd { background: rgba(255, 255, 255, 0.1); }
-tr.even { background: rgba(0, 0, 255, 0.05); }
-
-
-/*
- * Sorting classes for columns
- */
-tr.odd td.sorting_1 { background: rgba(0, 0, 0, 0.03); }
-tr.odd td.sorting_2 { background: rgba(0, 0, 0, 0.02); }
-tr.odd td.sorting_3 { background: rgba(0, 0, 0, 0.02); }
-tr.even td.sorting_1 { background: rgba(0, 0, 0, 0.08); }
-tr.even td.sorting_2 { background: rgba(0, 0, 0, 0.06); }
-tr.even td.sorting_3 { background: rgba(0, 0, 0, 0.06); }
-
-.css_left { position: relative; float: left; }
-.css_right { position: relative; float: right; }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd
deleted file mode 100644
index 53b2e06..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_disabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_disabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_disabled.jpg
deleted file mode 100644
index 1e73a54..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_disabled.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_enabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_enabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_enabled.jpg
deleted file mode 100644
index a6d764c..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/back_enabled.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/favicon.ico
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/favicon.ico b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/favicon.ico
deleted file mode 100644
index 6eeaa2a..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/favicon.ico and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_disabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_disabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_disabled.jpg
deleted file mode 100644
index 28a9dc5..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_disabled.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_enabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_enabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_enabled.jpg
deleted file mode 100644
index 598c075..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/forward_enabled.jpg and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc.png
deleted file mode 100644
index a56d0e2..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc_disabled.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc_disabled.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc_disabled.png
deleted file mode 100644
index b7e621e..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_asc_disabled.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_both.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_both.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_both.png
deleted file mode 100644
index 839ac4b..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_both.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc.png
deleted file mode 100644
index 90b2951..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc.png and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc_disabled.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc_disabled.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc_disabled.png
deleted file mode 100644
index 2409653..0000000
Binary files a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/images/sort_desc_disabled.png and /dev/null differ


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HDDS-755. ContainerInfo and ContainerReplica protobuf changes. Contributed by Nanda kumar.

Posted by su...@apache.org.
HDDS-755. ContainerInfo and ContainerReplica protobuf changes.
Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4f22b08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4f22b08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4f22b08

Branch: refs/heads/HDFS-12943
Commit: e4f22b08e0d1074c315680ba20d8666be21a25db
Parents: 773f0d1
Author: Nanda kumar <na...@apache.org>
Authored: Wed Oct 31 10:29:35 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Wed Oct 31 10:29:35 2018 +0530

----------------------------------------------------------------------
 .../scm/client/ContainerOperationClient.java    |  6 +--
 .../hadoop/hdds/scm/client/ScmClient.java       |  6 +--
 .../hdds/scm/container/ContainerInfo.java       |  8 +--
 ...rLocationProtocolClientSideTranslatorPB.java |  2 +-
 .../main/proto/DatanodeContainerProtocol.proto  | 27 +++++-----
 .../StorageContainerLocationProtocol.proto      |  4 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |  4 +-
 .../container/common/impl/ContainerData.java    | 22 ++++----
 .../common/impl/ContainerDataYaml.java          |  6 +--
 .../container/common/impl/HddsDispatcher.java   | 13 ++---
 .../container/common/interfaces/Container.java  |  9 ++--
 .../container/keyvalue/KeyValueContainer.java   | 31 ++++++-----
 .../keyvalue/KeyValueContainerData.java         |  9 ++--
 .../container/keyvalue/KeyValueHandler.java     | 16 +++---
 .../StorageContainerDatanodeProtocol.proto      | 57 +++++++++++---------
 .../ozone/container/common/ScmTestMock.java     | 14 ++---
 .../common/TestKeyValueContainerData.java       |  6 +--
 .../common/impl/TestContainerDataYaml.java      |  8 +--
 .../container/common/impl/TestContainerSet.java | 16 +++---
 .../keyvalue/TestKeyValueContainer.java         | 15 +++---
 .../container/keyvalue/TestKeyValueHandler.java |  2 +-
 .../scm/container/ContainerReportHandler.java   |  2 +-
 .../hdds/scm/container/SCMContainerManager.java | 23 ++++----
 .../apache/hadoop/hdds/scm/HddsTestUtils.java   |  2 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   | 20 +++----
 .../container/TestContainerReportHandler.java   |  6 ++-
 .../scm/container/TestSCMContainerManager.java  | 22 ++++----
 .../hdds/scm/cli/container/InfoSubcommand.java  |  8 ++-
 .../rpc/TestCloseContainerHandlingByClient.java |  2 +-
 .../rpc/TestContainerStateMachineFailures.java  |  2 +-
 .../common/impl/TestContainerPersistence.java   |  4 +-
 .../commandhandler/TestBlockDeletion.java       | 10 ++--
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |  2 +-
 33 files changed, 199 insertions(+), 185 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 25a71df..8c96164 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerData;
+    .ContainerDataProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadContainerResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -309,7 +309,7 @@ public class ContainerOperationClient implements ScmClient {
    * @throws IOException
    */
   @Override
-  public ContainerData readContainer(long containerID,
+  public ContainerDataProto readContainer(long containerID,
       Pipeline pipeline) throws IOException {
     XceiverClientSpi client = null;
     try {
@@ -337,7 +337,7 @@ public class ContainerOperationClient implements ScmClient {
    * @throws IOException
    */
   @Override
-  public ContainerData readContainer(long containerID) throws IOException {
+  public ContainerDataProto readContainer(long containerID) throws IOException {
     ContainerWithPipeline info = getContainerWithPipeline(containerID);
     return readContainer(containerID, info.getPipeline());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index 3d5d56c..b3c0c94 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerData;
+    .ContainerDataProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 import java.io.Closeable;
@@ -119,7 +119,7 @@ public interface ScmClient extends Closeable {
    * @return ContainerInfo
    * @throws IOException
    */
-  ContainerData readContainer(long containerID, Pipeline pipeline)
+  ContainerDataProto readContainer(long containerID, Pipeline pipeline)
       throws IOException;
 
   /**
@@ -128,7 +128,7 @@ public interface ScmClient extends Closeable {
    * @return ContainerInfo
    * @throws IOException
    */
-  ContainerData readContainer(long containerID)
+  ContainerDataProto readContainer(long containerID)
       throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index 64407a7..5a9484a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -114,7 +114,7 @@ public class ContainerInfo implements Comparator<ContainerInfo>,
   public ContainerInfo() {
   }
 
-  public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) {
+  public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) {
     ContainerInfo.Builder builder = new ContainerInfo.Builder();
     return builder.setPipelineID(
         PipelineID.getFromProtobuf(info.getPipelineID()))
@@ -191,9 +191,9 @@ public class ContainerInfo implements Comparator<ContainerInfo>,
     lastUsed = Time.monotonicNow();
   }
 
-  public HddsProtos.SCMContainerInfo getProtobuf() {
-    HddsProtos.SCMContainerInfo.Builder builder =
-        HddsProtos.SCMContainerInfo.newBuilder();
+  public HddsProtos.ContainerInfoProto getProtobuf() {
+    HddsProtos.ContainerInfoProto.Builder builder =
+        HddsProtos.ContainerInfoProto.newBuilder();
     Preconditions.checkState(containerID > 0);
     return builder.setContainerID(getContainerID())
         .setUsedBytes(getUsedBytes())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index d19efc1..3a4fa46 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -181,7 +181,7 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
       SCMListContainerResponseProto response =
           rpcProxy.listContainer(NULL_RPC_CONTROLLER, request);
       List<ContainerInfo> containerList = new ArrayList<>();
-      for (HddsProtos.SCMContainerInfo containerInfoProto : response
+      for (HddsProtos.ContainerInfoProto containerInfoProto : response
           .getContainersList()) {
         containerList.add(ContainerInfo.fromProtobuf(containerInfoProto));
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 318ec09..1700e23 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -158,17 +158,6 @@ message KeyValue {
   optional string value = 2;
 }
 
-/**
- * Lifecycle states of a container in Datanode.
- */
-enum ContainerLifeCycleState {
-    OPEN = 1;
-    CLOSING = 2;
-    CLOSED = 3;
-    UNHEALTHY = 4;
-    INVALID = 5;
-}
-
 message ContainerCommandRequestProto {
   required   Type cmdType = 1; // Type of the command
 
@@ -235,14 +224,22 @@ message ContainerCommandResponseProto {
   optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 21;
 }
 
-message ContainerData {
+message ContainerDataProto {
+  enum State {
+    OPEN = 1;
+    CLOSING = 2;
+    CLOSED = 3;
+    QUASI_CLOSED = 4;
+    UNHEALTHY = 5;
+    INVALID = 6;
+  }
   required int64 containerID = 1;
   repeated KeyValue metadata = 2;
   optional string containerPath = 4;
   optional int64 bytesUsed = 6;
   optional int64 size = 7;
   optional int64 blockCount = 8;
-  optional ContainerLifeCycleState state = 9 [default = OPEN];
+  optional State state = 9 [default = OPEN];
   optional ContainerType containerType = 10 [default = KeyValueContainer];
 }
 
@@ -264,7 +261,7 @@ message  ReadContainerRequestProto {
 }
 
 message  ReadContainerResponseProto {
-  optional ContainerData containerData = 1;
+  optional ContainerDataProto containerData = 1;
 }
 
 message  UpdateContainerRequestProto {
@@ -287,7 +284,7 @@ message  ListContainerRequestProto {
 }
 
 message  ListContainerResponseProto {
-  repeated ContainerData containerData = 1;
+  repeated ContainerDataProto containerData = 1;
 }
 
 message CloseContainerRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index 49d1975..71190ac 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -60,7 +60,7 @@ message GetContainerRequestProto {
 }
 
 message GetContainerResponseProto {
-  required SCMContainerInfo containerInfo = 1;
+  required ContainerInfoProto containerInfo = 1;
 }
 
 message GetContainerWithPipelineRequestProto {
@@ -77,7 +77,7 @@ message SCMListContainerRequestProto {
  }
 
 message SCMListContainerResponseProto {
-  repeated SCMContainerInfo containers = 1;
+  repeated ContainerInfoProto containers = 1;
 }
 
 message SCMDeleteContainerRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
index 62b4833..a0c6f16 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -131,7 +131,7 @@ enum LifeCycleEvent {
     CLEANUP = 8;
 }
 
-message SCMContainerInfo {
+message ContainerInfoProto {
     required int64 containerID = 1;
     required LifeCycleState state = 2;
     optional PipelineID pipelineID = 3;
@@ -145,7 +145,7 @@ message SCMContainerInfo {
 }
 
 message ContainerWithPipeline {
-  required SCMContainerInfo containerInfo = 1;
+  required ContainerInfoProto containerInfo = 1;
   required Pipeline pipeline = 2;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index efea20b..ad199f0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -25,8 +25,8 @@ import java.util.List;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
     ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    ContainerLifeCycleState;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerDataProto;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 
@@ -65,7 +65,7 @@ public abstract class ContainerData {
   private final Map<String, String> metadata;
 
   // State of the Container
-  private ContainerLifeCycleState state;
+  private ContainerDataProto.State state;
 
   private final long maxSize;
 
@@ -121,7 +121,7 @@ public abstract class ContainerData {
     this.containerID = containerId;
     this.layOutVersion = layOutVersion;
     this.metadata = new TreeMap<>();
-    this.state = ContainerLifeCycleState.OPEN;
+    this.state = ContainerDataProto.State.OPEN;
     this.readCount = new AtomicLong(0L);
     this.readBytes =  new AtomicLong(0L);
     this.writeCount =  new AtomicLong(0L);
@@ -158,7 +158,7 @@ public abstract class ContainerData {
    * Returns the state of the container.
    * @return ContainerLifeCycleState
    */
-  public synchronized ContainerLifeCycleState getState() {
+  public synchronized ContainerDataProto.State getState() {
     return state;
   }
 
@@ -166,7 +166,7 @@ public abstract class ContainerData {
    * Set the state of the container.
    * @param state
    */
-  public synchronized void setState(ContainerLifeCycleState state) {
+  public synchronized void setState(ContainerDataProto.State state) {
     this.state = state;
   }
 
@@ -222,7 +222,7 @@ public abstract class ContainerData {
    * @return - boolean
    */
   public synchronized  boolean isOpen() {
-    return ContainerLifeCycleState.OPEN == state;
+    return ContainerDataProto.State.OPEN == state;
   }
 
   /**
@@ -230,7 +230,7 @@ public abstract class ContainerData {
    * @return - boolean
    */
   public synchronized boolean isValid() {
-    return !(ContainerLifeCycleState.INVALID == state);
+    return !(ContainerDataProto.State.INVALID == state);
   }
 
   /**
@@ -238,14 +238,14 @@ public abstract class ContainerData {
    * @return - boolean
    */
   public synchronized  boolean isClosed() {
-    return ContainerLifeCycleState.CLOSED == state;
+    return ContainerDataProto.State.CLOSED == state;
   }
 
   /**
    * Marks this container as closed.
    */
   public synchronized void closeContainer() {
-    setState(ContainerLifeCycleState.CLOSED);
+    setState(ContainerDataProto.State.CLOSED);
   }
 
   /**
@@ -431,5 +431,5 @@ public abstract class ContainerData {
    *
    * @return Protocol Buffer Message
    */
-  public abstract ContainerProtos.ContainerData getProtoBufMessage();
+  public abstract ContainerProtos.ContainerDataProto getProtoBufMessage();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
index 65262d4..af705c4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
@@ -253,13 +253,13 @@ public final class ContainerDataYaml {
         String state = (String) nodes.get(OzoneConsts.STATE);
         switch (state) {
         case "OPEN":
-          kvData.setState(ContainerProtos.ContainerLifeCycleState.OPEN);
+          kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN);
           break;
         case "CLOSING":
-          kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSING);
+          kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSING);
           break;
         case "CLOSED":
-          kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
+          kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
           break;
         default:
           throw new IllegalStateException("Unexpected " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 1849841..6ab5b28 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -24,6 +24,8 @@ import com.google.common.collect.Maps;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerDataProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerAction;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
@@ -40,8 +42,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerLifeCycleState;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -156,9 +156,9 @@ public class HddsDispatcher implements ContainerDispatcher {
         // which has failed, so the container is marked unhealthy right here.
         // Once container is marked unhealthy, all the subsequent write
         // transactions will fail with UNHEALTHY_CONTAINER exception.
-        if (container.getContainerState() == ContainerLifeCycleState.OPEN) {
+        if (container.getContainerState() == ContainerDataProto.State.OPEN) {
           container.getContainerData()
-              .setState(ContainerLifeCycleState.UNHEALTHY);
+              .setState(ContainerDataProto.State.UNHEALTHY);
           sendCloseContainerActionIfNeeded(container);
         }
       }
@@ -191,7 +191,7 @@ public class HddsDispatcher implements ContainerDispatcher {
 
   private boolean isContainerFull(Container container) {
     boolean isOpen = Optional.ofNullable(container)
-        .map(cont -> cont.getContainerState() == ContainerLifeCycleState.OPEN)
+        .map(cont -> cont.getContainerState() == ContainerDataProto.State.OPEN)
         .orElse(Boolean.FALSE);
     if (isOpen) {
       ContainerData containerData = container.getContainerData();
@@ -205,7 +205,8 @@ public class HddsDispatcher implements ContainerDispatcher {
 
   private boolean isContainerUnhealthy(Container container) {
     return Optional.ofNullable(container).map(
-        cont -> (cont.getContainerState() == ContainerLifeCycleState.UNHEALTHY))
+        cont -> (cont.getContainerState() ==
+            ContainerDataProto.State.UNHEALTHY))
         .orElse(Boolean.FALSE);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index dbef74c..65147cc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -25,11 +25,10 @@ import java.io.OutputStream;
 import java.util.Map;
 
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerLifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -80,7 +79,7 @@ public interface Container<CONTAINERDATA extends ContainerData> extends RwLock {
    * @return ContainerLifeCycleState - Container State.
    * @throws StorageContainerException
    */
-  ContainerLifeCycleState getContainerState();
+  ContainerProtos.ContainerDataProto.State getContainerState();
 
   /**
    * Closes a open container, if it is already closed or does not exist a
@@ -130,7 +129,7 @@ public interface Container<CONTAINERDATA extends ContainerData> extends RwLock {
   /**
    * Returns containerReport for the container.
    */
-  StorageContainerDatanodeProtocolProtos.ContainerInfo getContainerReport()
+  ContainerReplicaProto getContainerReport()
       throws StorageContainerException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 9a5c94c..b82c12f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -30,13 +30,11 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerLifeCycleState;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -284,7 +282,7 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
 
     } catch (StorageContainerException ex) {
       // Failed to update .container file. Reset the state to CLOSING
-      containerData.setState(ContainerLifeCycleState.CLOSING);
+      containerData.setState(ContainerProtos.ContainerDataProto.State.CLOSING);
       throw ex;
     } finally {
       writeUnlock();
@@ -309,7 +307,7 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
   }
 
   @Override
-  public ContainerLifeCycleState getContainerState() {
+  public ContainerProtos.ContainerDataProto.State getContainerState() {
     return containerData.getState();
   }
 
@@ -427,7 +425,8 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
   @Override
   public void exportContainerData(OutputStream destination,
       ContainerPacker<KeyValueContainerData> packer) throws IOException {
-    if (getContainerData().getState() != ContainerLifeCycleState.CLOSED) {
+    if (getContainerData().getState() !=
+        ContainerProtos.ContainerDataProto.State.CLOSED) {
       throw new IllegalStateException(
           "Only closed containers could be exported: ContainerId="
               + getContainerData().getContainerID());
@@ -518,10 +517,10 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
    * Returns KeyValueContainerReport for the KeyValueContainer.
    */
   @Override
-  public StorageContainerDatanodeProtocolProtos.ContainerInfo
-      getContainerReport() throws StorageContainerException{
-    StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
-        StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
+  public ContainerReplicaProto getContainerReport()
+      throws StorageContainerException {
+    ContainerReplicaProto.Builder ciBuilder =
+        ContainerReplicaProto.newBuilder();
     ciBuilder.setContainerID(containerData.getContainerID())
         .setReadCount(containerData.getReadCount())
         .setWriteCount(containerData.getWriteCount())
@@ -540,18 +539,18 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
    * @return LifeCycle State of the container in HddsProtos format
    * @throws StorageContainerException
    */
-  private HddsProtos.LifeCycleState getHddsState()
+  private ContainerReplicaProto.State getHddsState()
       throws StorageContainerException {
-    HddsProtos.LifeCycleState state;
+    ContainerReplicaProto.State state;
     switch (containerData.getState()) {
     case OPEN:
-      state = HddsProtos.LifeCycleState.OPEN;
+      state = ContainerReplicaProto.State.OPEN;
       break;
     case CLOSING:
-      state = HddsProtos.LifeCycleState.CLOSING;
+      state = ContainerReplicaProto.State.CLOSING;
       break;
     case CLOSED:
-      state = HddsProtos.LifeCycleState.CLOSED;
+      state = ContainerReplicaProto.State.CLOSED;
       break;
     default:
       throw new StorageContainerException("Invalid Container state found: " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
index 9ea84c2..f114d34 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
@@ -24,6 +24,8 @@ import java.util.Collections;
 
 import org.apache.hadoop.conf.StorageSize;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerDataProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -245,9 +247,8 @@ public class KeyValueContainerData extends ContainerData {
    *
    * @return Protocol Buffer Message
    */
-  public ContainerProtos.ContainerData getProtoBufMessage() {
-    ContainerProtos.ContainerData.Builder builder = ContainerProtos
-        .ContainerData.newBuilder();
+  public ContainerDataProto getProtoBufMessage() {
+    ContainerDataProto.Builder builder = ContainerDataProto.newBuilder();
     builder.setContainerID(this.getContainerID());
     builder.setContainerPath(this.getMetadataPath());
     builder.setState(this.getState());
@@ -282,7 +283,7 @@ public class KeyValueContainerData extends ContainerData {
    */
   @VisibleForTesting
   public static KeyValueContainerData getFromProtoBuf(
-      ContainerProtos.ContainerData protoData) throws IOException {
+      ContainerDataProto protoData) throws IOException {
     // TODO: Add containerMaxSize to ContainerProtos.ContainerData
     StorageSize storageSize = StorageSize.parse(
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index b0bc08b..7aaa5e6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -32,12 +32,12 @@ import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerDataProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerLifeCycleState;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetSmallFileRequestProto;
@@ -385,13 +385,13 @@ public class KeyValueHandler extends Handler {
     }
 
     long containerID = kvContainer.getContainerData().getContainerID();
-    ContainerLifeCycleState containerState = kvContainer.getContainerState();
+    ContainerDataProto.State containerState = kvContainer.getContainerState();
 
     try {
-      if (containerState == ContainerLifeCycleState.CLOSED) {
+      if (containerState == ContainerDataProto.State .CLOSED) {
         LOG.debug("Container {} is already closed.", containerID);
         return ContainerUtils.getSuccessResponse(request);
-      } else if (containerState == ContainerLifeCycleState.INVALID) {
+      } else if (containerState == ContainerDataProto.State .INVALID) {
         LOG.debug("Invalid container data. ContainerID: {}", containerID);
         throw new StorageContainerException("Invalid container data. " +
             "ContainerID: " + containerID, INVALID_CONTAINER_STATE);
@@ -401,7 +401,7 @@ public class KeyValueHandler extends Handler {
 
       // remove the container from open block map once, all the blocks
       // have been committed and the container is closed
-      kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSING);
+      kvData.setState(ContainerDataProto.State.CLOSING);
       commitPendingBlocks(kvContainer);
       kvContainer.close();
       // make sure the the container open keys from BlockMap gets removed
@@ -798,9 +798,9 @@ public class KeyValueHandler extends Handler {
   private void checkContainerOpen(KeyValueContainer kvContainer)
       throws StorageContainerException {
 
-    ContainerLifeCycleState containerState = kvContainer.getContainerState();
+    ContainerDataProto.State containerState = kvContainer.getContainerState();
 
-    if (containerState == ContainerLifeCycleState.OPEN) {
+    if (containerState == ContainerDataProto.State.OPEN) {
       return;
     } else {
       String msg = "Requested operation not allowed as ContainerState is " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 72d48a6..4ddb7b2 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -80,10 +80,11 @@ message SCMHeartbeatRequestProto {
   required DatanodeDetailsProto datanodeDetails = 1;
   optional NodeReportProto nodeReport = 2;
   optional ContainerReportsProto containerReport = 3;
-  repeated CommandStatusReportsProto commandStatusReports = 4;
-  optional ContainerActionsProto containerActions = 5;
-  optional PipelineActionsProto pipelineActions = 6;
-  optional PipelineReportsProto pipelineReports = 7;
+  optional IncrementalContainerReportProto incrementalContainerReport = 4;
+  repeated CommandStatusReportsProto commandStatusReports = 5;
+  optional ContainerActionsProto containerActions = 6;
+  optional PipelineActionsProto pipelineActions = 7;
+  optional PipelineReportsProto pipelineReports = 8;
 }
 
 /*
@@ -128,7 +129,34 @@ enum StorageTypeProto {
 }
 
 message ContainerReportsProto {
-  repeated ContainerInfo reports = 1;
+  repeated ContainerReplicaProto reports = 1;
+}
+
+message IncrementalContainerReportProto {
+  repeated ContainerReplicaProto report = 1;
+}
+
+message ContainerReplicaProto {
+  enum State {
+    OPEN = 1;
+    CLOSING = 2;
+    CLOSED = 3;
+    QUASI_CLOSED = 4;
+    UNHEALTHY = 5;
+    INVALID = 6;
+  }
+  required int64 containerID = 1;
+  required State state = 2;
+  optional int64 size = 3;
+  optional int64 used = 4;
+  optional int64 keyCount = 5;
+  optional int64 readCount = 6;
+  optional int64 writeCount = 7;
+  optional int64 readBytes = 8;
+  optional int64 writeBytes = 9;
+  optional string finalhash = 10;
+  optional int64 deleteTransactionId = 11;
+  optional uint64 blockCommitSequenceId = 12;
 }
 
 message CommandStatusReportsProto {
@@ -200,25 +228,6 @@ message PipelineAction {
   optional ClosePipelineInfo closePipeline = 2;
 }
 
-/**
-A container report contains the following information.
-*/
-message ContainerInfo {
-  required int64 containerID = 1;
-  optional int64 size = 2;
-  optional int64 used = 3;
-  optional int64 keyCount = 4;
-  // TODO: move the io count to separate message
-  optional int64 readCount = 5;
-  optional int64 writeCount = 6;
-  optional int64 readBytes = 7;
-  optional int64 writeBytes = 8;
-  optional string finalhash = 9;
-  optional hadoop.hdds.LifeCycleState state = 10;
-  optional int64 deleteTransactionId = 11;
-  optional uint64 blockCommitSequenceId = 12;
-}
-
 /*
  * These are commands returned by SCM for to the datanode to execute.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 3e45596..55fcf26 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.container.common;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
@@ -32,8 +34,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
@@ -66,8 +66,9 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
   }
 
   // Map of datanode to containers
-  private Map<DatanodeDetails, Map<String, ContainerInfo>> nodeContainers =
-      new HashMap();
+  private Map<DatanodeDetails,
+      Map<String, ContainerReplicaProto>> nodeContainers =
+      new HashMap<>();
   private Map<DatanodeDetails, NodeReportProto> nodeReports = new HashMap<>();
   private AtomicInteger commandStatusReport = new AtomicInteger(0);
   private List<CommandStatus> cmdStatusList = new LinkedList<>();
@@ -274,7 +275,7 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
         nodeContainers.put(datanode, containers);
       }
 
-      for (StorageContainerDatanodeProtocolProtos.ContainerInfo report : reports
+      for (ContainerReplicaProto report : reports
           .getReportsList()) {
         containers.put(report.getContainerID(), report);
       }
@@ -297,7 +298,8 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
    * @return count of storage reports of a datanode
    */
   public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) {
-    Map<String, ContainerInfo> cr = nodeContainers.get(datanodeDetails);
+    Map<String, ContainerReplicaProto> cr =
+        nodeContainers.get(datanodeDetails);
     if(cr != null) {
       return cr.size();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
index f991520..824b770 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
@@ -40,8 +40,8 @@ public class TestKeyValueContainerData {
         .ContainerType.KeyValueContainer;
     String path = "/tmp";
     String containerDBType = "RocksDB";
-    ContainerProtos.ContainerLifeCycleState state = ContainerProtos
-        .ContainerLifeCycleState.CLOSED;
+    ContainerProtos.ContainerDataProto.State state =
+        ContainerProtos.ContainerDataProto.State.CLOSED;
     AtomicLong val = new AtomicLong(0);
 
     KeyValueContainerData kvData = new KeyValueContainerData(containerId,
@@ -49,7 +49,7 @@ public class TestKeyValueContainerData {
 
     assertEquals(containerType, kvData.getContainerType());
     assertEquals(containerId, kvData.getContainerID());
-    assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData
+    assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData
         .getState());
     assertEquals(0, kvData.getMetadata().size());
     assertEquals(0, kvData.getNumPendingDeletionBlocks());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
index c7b9e0a..087d627 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
@@ -91,7 +91,7 @@ public class TestContainerDataYaml {
     assertEquals("RocksDB", kvData.getContainerDBType());
     assertEquals(containerFile.getParent(), kvData.getMetadataPath());
     assertEquals(containerFile.getParent(), kvData.getChunksPath());
-    assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN, kvData
+    assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData
         .getState());
     assertEquals(1, kvData.getLayOutVersion());
     assertEquals(0, kvData.getMetadata().size());
@@ -100,7 +100,7 @@ public class TestContainerDataYaml {
     // Update ContainerData.
     kvData.addMetadata("VOLUME", "hdfs");
     kvData.addMetadata("OWNER", "ozone");
-    kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
+    kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
 
 
     ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
@@ -117,7 +117,7 @@ public class TestContainerDataYaml {
     assertEquals("RocksDB", kvData.getContainerDBType());
     assertEquals(containerFile.getParent(), kvData.getMetadataPath());
     assertEquals(containerFile.getParent(), kvData.getChunksPath());
-    assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData
+    assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
         .getState());
     assertEquals(1, kvData.getLayOutVersion());
     assertEquals(2, kvData.getMetadata().size());
@@ -161,7 +161,7 @@ public class TestContainerDataYaml {
       ContainerUtils.verifyChecksum(kvData);
 
       //Checking the Container file data is consistent or not
-      assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData
+      assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData
           .getState());
       assertEquals("RocksDB", kvData.getContainerDBType());
       assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
index af322ea..7d8e438 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
@@ -51,8 +51,8 @@ public class TestContainerSet {
   public void testAddGetRemoveContainer() throws StorageContainerException {
     ContainerSet containerSet = new ContainerSet();
     long containerId = 100L;
-    ContainerProtos.ContainerLifeCycleState state = ContainerProtos
-        .ContainerLifeCycleState.CLOSED;
+    ContainerProtos.ContainerDataProto.State state = ContainerProtos
+        .ContainerDataProto.State.CLOSED;
 
     KeyValueContainerData kvData = new KeyValueContainerData(containerId,
         (long) StorageUnit.GB.toBytes(5));
@@ -101,10 +101,10 @@ public class TestContainerSet {
       ContainerData containerData = kv.getContainerData();
       long containerId = containerData.getContainerID();
       if (containerId%2 == 0) {
-        assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
+        assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
             containerData.getState());
       } else {
-        assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN,
+        assertEquals(ContainerProtos.ContainerDataProto.State.OPEN,
             containerData.getState());
       }
       count++;
@@ -121,10 +121,10 @@ public class TestContainerSet {
       ContainerData containerData = kv.getContainerData();
       long containerId = containerData.getContainerID();
       if (containerId%2 == 0) {
-        assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
+        assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
             containerData.getState());
       } else {
-        assertEquals(ContainerProtos.ContainerLifeCycleState.OPEN,
+        assertEquals(ContainerProtos.ContainerDataProto.State.OPEN,
             containerData.getState());
       }
       count++;
@@ -168,9 +168,9 @@ public class TestContainerSet {
       KeyValueContainerData kvData = new KeyValueContainerData(i,
           (long) StorageUnit.GB.toBytes(5));
       if (i%2 == 0) {
-        kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
+        kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED);
       } else {
-        kvData.setState(ContainerProtos.ContainerLifeCycleState.OPEN);
+        kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN);
       }
       KeyValueContainer kv = new KeyValueContainer(kvData, new
           OzoneConfiguration());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index bf6b8b0..8c0db4a 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerLifeCycleState;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
@@ -185,7 +183,8 @@ public class TestKeyValueContainer {
     keyValueContainerData = keyValueContainer
         .getContainerData();
 
-    keyValueContainerData.setState(ContainerLifeCycleState.CLOSED);
+    keyValueContainerData.setState(
+        ContainerProtos.ContainerDataProto.State.CLOSED);
 
     int numberOfKeysToWrite = 12;
     //write one few keys to check the key count after import
@@ -286,7 +285,7 @@ public class TestKeyValueContainer {
 
   @Test
   public void testDeleteContainer() throws Exception {
-    keyValueContainerData.setState(ContainerProtos.ContainerLifeCycleState
+    keyValueContainerData.setState(ContainerProtos.ContainerDataProto.State
         .CLOSED);
     keyValueContainer = new KeyValueContainer(
         keyValueContainerData, conf);
@@ -315,7 +314,7 @@ public class TestKeyValueContainer {
     keyValueContainerData = keyValueContainer
         .getContainerData();
 
-    assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
+    assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
         keyValueContainerData.getState());
 
     //Check state in the .container file
@@ -325,7 +324,7 @@ public class TestKeyValueContainer {
 
     keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
         .readContainerFile(containerFile);
-    assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
+    assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
         keyValueContainerData.getState());
   }
 
@@ -354,8 +353,8 @@ public class TestKeyValueContainer {
   @Test
   public void testUpdateContainerUnsupportedRequest() throws Exception {
     try {
-      keyValueContainerData.setState(ContainerProtos.ContainerLifeCycleState
-          .CLOSED);
+      keyValueContainerData.setState(
+          ContainerProtos.ContainerDataProto.State.CLOSED);
       keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
       keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
       Map<String, String> metadata = new HashMap<>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index e1904c1..dcda10b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -266,7 +266,7 @@ public class TestKeyValueHandler {
     KeyValueContainerData kvData = new KeyValueContainerData(containerID,
         (long) StorageUnit.GB.toBytes(1));
     KeyValueContainer container = new KeyValueContainer(kvData, conf);
-    kvData.setState(ContainerProtos.ContainerLifeCycleState.INVALID);
+    kvData.setState(ContainerProtos.ContainerDataProto.State.INVALID);
 
     // Create Close container request
     ContainerCommandRequestProto closeContainerRequest =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 5885d959..0cb2f81 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -78,7 +78,7 @@ public class ContainerReportHandler implements
 
       Set<ContainerID> containerIds = containerReport.getReportsList().stream()
           .map(StorageContainerDatanodeProtocolProtos
-              .ContainerInfo::getContainerID)
+              .ContainerReplicaProto::getContainerID)
           .map(ContainerID::new)
           .collect(Collectors.toSet());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 0f980dc1..4e6f09e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -22,6 +22,7 @@ import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList;
@@ -37,7 +38,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -144,7 +145,7 @@ public class SCMContainerManager implements ContainerManager {
         .getSequentialRangeKVs(null, Integer.MAX_VALUE, null);
     for (Map.Entry<byte[], byte[]> entry : range) {
       ContainerInfo container = ContainerInfo.fromProtobuf(
-          HddsProtos.SCMContainerInfo.PARSER.parseFrom(entry.getValue()));
+          ContainerInfoProto.PARSER.parseFrom(entry.getValue()));
       Preconditions.checkNotNull(container);
       containerStateManager.loadContainer(container);
       if (container.isOpen()) {
@@ -452,7 +453,7 @@ public class SCMContainerManager implements ContainerManager {
               SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER);
         }
         ContainerInfo containerInfo = ContainerInfo.fromProtobuf(
-            HddsProtos.SCMContainerInfo.parseFrom(containerBytes));
+            HddsProtos.ContainerInfoProto.parseFrom(containerBytes));
         containerInfo.updateDeleteTransactionId(entry.getValue());
         batch.put(dbKey, containerInfo.getProtobuf().toByteArray());
       }
@@ -507,11 +508,11 @@ public class SCMContainerManager implements ContainerManager {
   @Override
   public void processContainerReports(DatanodeDetails datanodeDetails,
       ContainerReportsProto reports) throws IOException {
-    List<StorageContainerDatanodeProtocolProtos.ContainerInfo>
+    List<ContainerReplicaProto>
         containerInfos = reports.getReportsList();
     PendingDeleteStatusList pendingDeleteStatusList =
         new PendingDeleteStatusList(datanodeDetails);
-    for (StorageContainerDatanodeProtocolProtos.ContainerInfo newInfo :
+    for (ContainerReplicaProto newInfo :
         containerInfos) {
       ContainerID id = ContainerID.valueof(newInfo.getContainerID());
       ContainerReplica replica = ContainerReplica.newBuilder()
@@ -523,7 +524,7 @@ public class SCMContainerManager implements ContainerManager {
       try {
         containerStateManager.updateContainerReplica(id, replica);
         ContainerInfo currentInfo = containerStateManager.getContainer(id);
-        if (newInfo.getState() == LifeCycleState.CLOSED
+        if (newInfo.getState() == ContainerReplicaProto.State.CLOSED
             && currentInfo.getState() == LifeCycleState.CLOSING) {
           currentInfo = updateContainerStateInternal(id, LifeCycleEvent.CLOSE);
           if (!currentInfo.isOpen()) {
@@ -532,7 +533,7 @@ public class SCMContainerManager implements ContainerManager {
           }
         }
 
-        HddsProtos.SCMContainerInfo newState =
+        ContainerInfoProto newState =
             reconcileState(newInfo, currentInfo);
 
         if (currentInfo.getDeleteTransactionId() >
@@ -567,11 +568,11 @@ public class SCMContainerManager implements ContainerManager {
    * @param knownState - State inside SCM.
    * @return new SCM State for this container.
    */
-  private HddsProtos.SCMContainerInfo reconcileState(
-      StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState,
+  private HddsProtos.ContainerInfoProto reconcileState(
+      ContainerReplicaProto datanodeState,
       ContainerInfo knownState) {
-    HddsProtos.SCMContainerInfo.Builder builder =
-        HddsProtos.SCMContainerInfo.newBuilder();
+    HddsProtos.ContainerInfoProto.Builder builder =
+        HddsProtos.ContainerInfoProto.newBuilder();
     builder.setContainerID(knownState.getContainerID())
         .setPipelineID(knownState.getPipelineID().getProtobuf())
         .setState(knownState.getState())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
index 9b28e1e..0c79238 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
@@ -55,7 +55,7 @@ public final class HddsTestUtils {
    */
   public static NodeRegistrationContainerReport
       createNodeRegistrationContainerReport(List<ContainerInfo> dnContainers) {
-    List<StorageContainerDatanodeProtocolProtos.ContainerInfo>
+    List<StorageContainerDatanodeProtocolProtos.ContainerReplicaProto>
         containers = new ArrayList<>();
     dnContainers.forEach(c -> {
       containers.add(TestUtils.getRandomContainerInfo(c.getContainerID()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 9d3ec10..66ae682 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineReport;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
@@ -28,8 +30,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol
     .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
@@ -293,7 +293,7 @@ public final class TestUtils {
    */
   public static ContainerReportsProto getRandomContainerReports(
       int numberOfContainers) {
-    List<ContainerInfo> containerInfos = new ArrayList<>();
+    List<ContainerReplicaProto> containerInfos = new ArrayList<>();
     for (int i = 0; i < numberOfContainers; i++) {
       containerInfos.add(getRandomContainerInfo(i));
     }
@@ -326,7 +326,7 @@ public final class TestUtils {
    * @return ContainerReportsProto
    */
   public static ContainerReportsProto getContainerReports(
-      ContainerInfo... containerInfos) {
+      ContainerReplicaProto... containerInfos) {
     return getContainerReports(Arrays.asList(containerInfos));
   }
 
@@ -338,10 +338,10 @@ public final class TestUtils {
    * @return ContainerReportsProto
    */
   public static ContainerReportsProto getContainerReports(
-      List<ContainerInfo> containerInfos) {
+      List<ContainerReplicaProto> containerInfos) {
     ContainerReportsProto.Builder
         reportsBuilder = ContainerReportsProto.newBuilder();
-    for (ContainerInfo containerInfo : containerInfos) {
+    for (ContainerReplicaProto containerInfo : containerInfos) {
       reportsBuilder.addReports(containerInfo);
     }
     return reportsBuilder.build();
@@ -354,7 +354,8 @@ public final class TestUtils {
    *
    * @return ContainerInfo
    */
-  public static ContainerInfo getRandomContainerInfo(long containerId) {
+  public static ContainerReplicaProto getRandomContainerInfo(
+      long containerId) {
     return createContainerInfo(containerId,
         OzoneConsts.GB * 5,
         random.nextLong(1000),
@@ -379,11 +380,12 @@ public final class TestUtils {
    *
    * @return ContainerInfo
    */
-  public static ContainerInfo createContainerInfo(
+  public static ContainerReplicaProto createContainerInfo(
       long containerId, long size, long keyCount, long bytesUsed,
       long readCount, long readBytes, long writeCount, long writeBytes) {
-    return ContainerInfo.newBuilder()
+    return ContainerReplicaProto.newBuilder()
         .setContainerID(containerId)
+        .setState(ContainerReplicaProto.State.OPEN)
         .setSize(size)
         .setKeyCount(keyCount)
         .setUsed(bytesUsed)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index 861d241..a5475e2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -191,9 +191,11 @@ public class TestContainerReportHandler implements EventPublisher {
 
     for (long containerId : containerIds) {
       org.apache.hadoop.hdds.protocol.proto
-          .StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder
+          .StorageContainerDatanodeProtocolProtos
+          .ContainerReplicaProto.Builder
           ciBuilder = org.apache.hadoop.hdds.protocol.proto
-          .StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
+          .StorageContainerDatanodeProtocolProtos
+          .ContainerReplicaProto.newBuilder();
       ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
           .setSize(5368709120L)
           .setUsed(2000000000L)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index fa0f084..02c292c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos;
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
@@ -244,10 +244,10 @@ public class TestSCMContainerManager {
   public void testFullContainerReport() throws Exception {
     ContainerInfo info = createContainer();
     DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-    List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
+    List<ContainerReplicaProto> reports =
         new ArrayList<>();
-    StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
-        StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
+    ContainerReplicaProto.Builder ciBuilder =
+        ContainerReplicaProto.newBuilder();
     ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
         .setSize(5368709120L)
         .setUsed(2000000000L)
@@ -257,6 +257,7 @@ public class TestSCMContainerManager {
         .setReadBytes(2000000000L)
         .setWriteBytes(2000000000L)
         .setContainerID(info.getContainerID())
+        .setState(ContainerReplicaProto.State.CLOSED)
         .setDeleteTransactionId(0);
 
     reports.add(ciBuilder.build());
@@ -274,14 +275,14 @@ public class TestSCMContainerManager {
         updatedContainer.getNumberOfKeys());
     Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
 
-    for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) {
+    for (ContainerReplicaProto c : reports) {
      Assert.assertEquals(containerManager.getContainerReplicas(
          ContainerID.valueof(c.getContainerID())).size(), 1);
     }
 
     containerManager.processContainerReports(TestUtils.randomDatanodeDetails(),
         crBuilder.build());
-    for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) {
+    for (ContainerReplicaProto c : reports) {
       Assert.assertEquals(containerManager.getContainerReplicas(
               ContainerID.valueof(c.getContainerID())).size(), 2);
     }
@@ -292,10 +293,10 @@ public class TestSCMContainerManager {
     ContainerInfo info1 = createContainer();
     ContainerInfo info2 = createContainer();
     DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
-    List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
+    List<ContainerReplicaProto> reports =
         new ArrayList<>();
-    StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
-        StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
+    ContainerReplicaProto.Builder ciBuilder =
+        ContainerReplicaProto.newBuilder();
     long cID1 = info1.getContainerID();
     long cID2 = info2.getContainerID();
     ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
@@ -304,7 +305,8 @@ public class TestSCMContainerManager {
         .setKeyCount(100000000L)
         .setReadBytes(1000000000L)
         .setWriteBytes(1000000000L)
-        .setContainerID(cID1);
+        .setContainerID(cID1)
+        .setState(ContainerReplicaProto.State.CLOSED);
     reports.add(ciBuilder.build());
 
     ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea54a9")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
index bf37718..f202254 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
@@ -23,9 +23,7 @@ import java.util.stream.Collectors;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerData;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerLifeCycleState;
+    .ContainerDataProto;
 import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -64,13 +62,13 @@ public class InfoSubcommand implements Callable<Void> {
           getContainerWithPipeline(containerID);
       Preconditions.checkNotNull(container, "Container cannot be null");
 
-      ContainerData containerData = scmClient.readContainer(container
+      ContainerDataProto containerData = scmClient.readContainer(container
           .getContainerInfo().getContainerID(), container.getPipeline());
 
       // Print container report info.
       LOG.info("Container id: {}", containerID);
       String openStatus =
-          containerData.getState() == ContainerLifeCycleState.OPEN ? "OPEN" :
+          containerData.getState() == ContainerDataProto.State.OPEN ? "OPEN" :
               "CLOSED";
       LOG.info("Container State: {}", openStatus);
       LOG.info("Container Path: {}", containerData.getContainerPath());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index 76f6f8c..d06a0bc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -464,7 +464,7 @@ public class TestCloseContainerHandlingByClient {
       if (datanodes.get(0).equals(datanodeService.getDatanodeDetails())) {
         datanodeService.getDatanodeStateMachine().getContainer()
             .getContainerSet().getContainer(containerID).getContainerData()
-            .setState(ContainerProtos.ContainerLifeCycleState.CLOSING);
+            .setState(ContainerProtos.ContainerDataProto.State.CLOSING);
       }
     }
     dataString = fixedLengthString(keyString, (chunkSize * 1 / 2));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 0e593fb..b3f0be7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -161,7 +161,7 @@ public class TestContainerStateMachineFailures {
             .getContainer().getContainerSet()
             .getContainer(omKeyLocationInfo.getContainerID())
             .getContainerState()
-            == ContainerProtos.ContainerLifeCycleState.UNHEALTHY);
+            == ContainerProtos.ContainerDataProto.State.UNHEALTHY);
     try {
       // subsequent requests will fail with unhealthy container exception
       key.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index f81ee57..c2941ed 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -268,10 +268,10 @@ public class TestContainerPersistence {
 
     // ContainerSet#getContainerReport currently returns all containers (open
     // and closed) reports.
-    List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
+    List<StorageContainerDatanodeProtocolProtos.ContainerReplicaProto> reports =
         containerSet.getContainerReport().getReportsList();
     Assert.assertEquals(10, reports.size());
-    for (StorageContainerDatanodeProtocolProtos.ContainerInfo report :
+    for (StorageContainerDatanodeProtocolProtos.ContainerReplicaProto report :
         reports) {
       long actualContainerID = report.getContainerID();
       Assert.assertTrue(containerIDs.remove(actualContainerID));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 63346d2..c49a98b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl;
@@ -233,9 +233,11 @@ public class TestBlockDeletion {
     ContainerReportsProto containerReport = dnContainerSet.getContainerReport();
     ContainerReportsProto.Builder dummyReportsBuilder =
         ContainerReportsProto.newBuilder();
-    for (ContainerInfo containerInfo : containerReport.getReportsList()) {
+    for (ContainerReplicaProto containerInfo :
+        containerReport.getReportsList()) {
       dummyReportsBuilder.addReports(
-          ContainerInfo.newBuilder(containerInfo).setDeleteTransactionId(0)
+          ContainerReplicaProto.newBuilder(containerInfo)
+              .setDeleteTransactionId(0)
               .build());
     }
     ContainerReportsProto dummyReport = dummyReportsBuilder.build();
@@ -246,7 +248,7 @@ public class TestBlockDeletion {
     // wait for event to be handled by event handler
     Thread.sleep(1000);
     String output = logCapturer.getOutput();
-    for (ContainerInfo containerInfo : dummyReport.getReportsList()) {
+    for (ContainerReplicaProto containerInfo : dummyReport.getReportsList()) {
       long containerId = containerInfo.getContainerID();
       // Event should be triggered only for containers which have deleted blocks
       if (containerIdsWithDeletedBlocks.contains(containerId)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4f22b08/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index c03128d..0c7b7ed 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -469,7 +469,7 @@ public class SQLCLI  extends Configured implements Tool {
         long containerID = Longs.fromByteArray(key);
         ContainerInfo containerInfo = null;
         containerInfo = ContainerInfo.fromProtobuf(
-            HddsProtos.SCMContainerInfo.PARSER.parseFrom(value));
+            HddsProtos.ContainerInfoProto.PARSER.parseFrom(value));
         Preconditions.checkNotNull(containerInfo);
         try {
           //TODO: include container state to sqllite schema


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDDS-694. Plugin new Pipeline management code in SCM. Contributed by Lokesh Jain.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index fc9afd6..d8b9958 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -63,9 +63,10 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeReportHandler;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.node.StaleNodeHandler;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineCloseHandler;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineActionEventHandler;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineReportHandler;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
@@ -149,6 +150,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    * State Managers of SCM.
    */
   private final NodeManager scmNodeManager;
+  private final PipelineManager pipelineManager;
   private final ContainerManager containerManager;
   private final BlockManager scmBlockManager;
   private final SCMStorage scmStorage;
@@ -201,8 +203,9 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
 
     scmNodeManager = new SCMNodeManager(
         conf, scmStorage.getClusterID(), this, eventQueue);
+    pipelineManager = new SCMPipelineManager(conf, scmNodeManager, eventQueue);
     containerManager = new SCMContainerManager(
-        conf, scmNodeManager, eventQueue);
+        conf, scmNodeManager, pipelineManager, eventQueue);
     scmBlockManager = new BlockManagerImpl(
         conf, scmNodeManager, containerManager, eventQueue);
 
@@ -213,14 +216,13 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     NodeReportHandler nodeReportHandler =
         new NodeReportHandler(scmNodeManager);
     PipelineReportHandler pipelineReportHandler =
-            new PipelineReportHandler(
-                    containerManager.getPipelineSelector());
+            new PipelineReportHandler(pipelineManager, conf);
     CommandStatusReportHandler cmdStatusReportHandler =
         new CommandStatusReportHandler();
 
     NewNodeHandler newNodeHandler = new NewNodeHandler(scmNodeManager);
     StaleNodeHandler staleNodeHandler =
-        new StaleNodeHandler(containerManager.getPipelineSelector());
+        new StaleNodeHandler(scmNodeManager, pipelineManager);
     DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
         containerManager);
     ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
@@ -231,11 +233,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         new ContainerReportHandler(containerManager, scmNodeManager,
             replicationStatus);
 
-    PipelineActionEventHandler pipelineActionEventHandler =
-        new PipelineActionEventHandler();
-
-    PipelineCloseHandler pipelineCloseHandler =
-        new PipelineCloseHandler(containerManager.getPipelineSelector());
+    PipelineActionHandler pipelineActionHandler =
+        new PipelineActionHandler(pipelineManager);
 
     long watcherTimeout =
         conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
@@ -294,10 +293,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         .addHandler(SCMEvents.PENDING_DELETE_STATUS, pendingDeleteHandler);
     eventQueue.addHandler(SCMEvents.DELETE_BLOCK_STATUS,
         (DeletedBlockLogImpl) scmBlockManager.getDeletedBlockLog());
-    eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS,
-        pipelineActionEventHandler);
-    eventQueue.addHandler(SCMEvents.PIPELINE_CLOSE, pipelineCloseHandler);
-
+    eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS, pipelineActionHandler);
     eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, clientProtocolServer);
     eventQueue.addHandler(SCMEvents.PIPELINE_REPORT, pipelineReportHandler);
 
@@ -771,6 +767,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
       LOG.error("SCM Event Queue stop failed", ex);
     }
     IOUtils.cleanupWithLogger(LOG, containerManager);
+    IOUtils.cleanupWithLogger(LOG, pipelineManager);
   }
 
   /**
@@ -815,6 +812,16 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     return scmNodeManager;
   }
 
+  /**
+   * Returns pipeline manager.
+   *
+   * @return - Pipeline Manager
+   */
+  @VisibleForTesting
+  public PipelineManager getPipelineManager() {
+    return pipelineManager;
+  }
+
   @VisibleForTesting
   public BlockManager getScmBlockManager() {
     return scmBlockManager;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index a9c6906..32e8640 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -58,6 +60,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.MB;
 public class TestBlockManager implements EventHandler<Boolean> {
   private static SCMContainerManager mapping;
   private static MockNodeManager nodeManager;
+  private static PipelineManager pipelineManager;
   private static BlockManagerImpl blockManager;
   private static File testDir;
   private final static long DEFAULT_BLOCK_SIZE = 128 * MB;
@@ -83,7 +86,10 @@ public class TestBlockManager implements EventHandler<Boolean> {
       throw new IOException("Unable to create test directory path");
     }
     nodeManager = new MockNodeManager(true, 10);
-    mapping = new SCMContainerManager(conf, nodeManager, eventQueue);
+    pipelineManager =
+        new SCMPipelineManager(conf, nodeManager, eventQueue);
+    mapping = new SCMContainerManager(conf, nodeManager, pipelineManager,
+        eventQueue);
     blockManager = new BlockManagerImpl(conf,
         nodeManager, mapping, eventQueue);
     eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);
@@ -101,6 +107,7 @@ public class TestBlockManager implements EventHandler<Boolean> {
   @After
   public void cleanup() throws IOException {
     blockManager.close();
+    pipelineManager.close();
     mapping.close();
     FileUtil.fullyDelete(testDir);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index 5b76137..06f4f5e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -24,12 +24,11 @@ import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto
@@ -102,12 +101,13 @@ public class TestDeletedBlockLog {
 
     ContainerInfo containerInfo =
         new ContainerInfo.Builder().setContainerID(1).build();
-    Pipeline pipeline =
-        new Pipeline(null, LifeCycleState.CLOSED,
-            ReplicationType.RATIS, ReplicationFactor.THREE, null);
-    pipeline.addMember(dnList.get(0));
-    pipeline.addMember(dnList.get(1));
-    pipeline.addMember(dnList.get(2));
+    Pipeline pipeline = Pipeline.newBuilder()
+        .setType(ReplicationType.RATIS)
+        .setFactor(ReplicationFactor.THREE)
+        .setState(Pipeline.PipelineState.CLOSED)
+        .setId(PipelineID.randomId())
+        .setNodes(dnList)
+        .build();
     ContainerWithPipeline containerWithPipeline =
         new ContainerWithPipeline(containerInfo, pipeline);
     when(containerManager.getContainerWithPipeline(anyObject()))
@@ -383,11 +383,15 @@ public class TestDeletedBlockLog {
 
   private void mockContainerInfo(long containerID, DatanodeDetails dd)
       throws IOException {
-    Pipeline pipeline =
-        new Pipeline("fake", LifeCycleState.OPEN,
-            ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
-            PipelineID.randomId());
-    pipeline.addMember(dd);
+    List<DatanodeDetails> dns = new ArrayList<>();
+    dns.add(dd);
+    Pipeline pipeline = Pipeline.newBuilder()
+            .setType(ReplicationType.STAND_ALONE)
+            .setFactor(ReplicationFactor.ONE)
+            .setState(Pipeline.PipelineState.OPEN)
+            .setId(PipelineID.randomId())
+            .setNodes(dns)
+            .build();
 
     ContainerInfo.Builder builder = new ContainerInfo.Builder();
     builder.setPipelineID(pipeline.getId())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index f4ce102..8d36d29 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdds.scm.container;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 517bc67..44a8deb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -65,8 +67,11 @@ public class TestCloseContainerEventHandler {
     configuration
         .set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     nodeManager = new MockNodeManager(true, 10);
-    containerManager = new SCMContainerManager(configuration, nodeManager,
-        new EventQueue());
+    PipelineManager pipelineManager =
+        new SCMPipelineManager(configuration, nodeManager, eventQueue);
+    containerManager = new
+        SCMContainerManager(configuration, nodeManager,
+        pipelineManager, new EventQueue());
     eventQueue = new EventQueue();
     eventQueue.addHandler(CLOSE_CONTAINER,
         new CloseContainerEventHandler(containerManager));
@@ -110,11 +115,12 @@ public class TestCloseContainerEventHandler {
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
     ContainerWithPipeline containerWithPipeline = containerManager
-        .allocateContainer(HddsProtos.ReplicationType.STAND_ALONE,
+        .allocateContainer(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.ONE, "ozone");
     ContainerID id = new ContainerID(
         containerWithPipeline.getContainerInfo().getContainerID());
-    DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
+    DatanodeDetails datanode =
+        containerWithPipeline.getPipeline().getFirstNode();
     int closeCount = nodeManager.getCommandCount(datanode);
     eventQueue.fireEvent(CLOSE_CONTAINER, id);
     eventQueue.processAll(1000);
@@ -149,13 +155,13 @@ public class TestCloseContainerEventHandler {
     eventQueue.processAll(1000);
     int i = 0;
     for (DatanodeDetails details : containerWithPipeline.getPipeline()
-        .getMachines()) {
+        .getNodes()) {
       closeCount[i] = nodeManager.getCommandCount(details);
       i++;
     }
     i = 0;
     for (DatanodeDetails details : containerWithPipeline.getPipeline()
-        .getMachines()) {
+        .getNodes()) {
       Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details));
       i++;
     }
@@ -166,7 +172,7 @@ public class TestCloseContainerEventHandler {
     i = 0;
     // Make sure close is queued for each datanode on the pipeline
     for (DatanodeDetails details : containerWithPipeline.getPipeline()
-        .getMachines()) {
+        .getNodes()) {
       Assert.assertEquals(closeCount[i] + 1,
           nodeManager.getCommandCount(details));
       Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index 7135173..861d241 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hdds.scm.container.replication
     .ReplicationActivityStatus;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .ContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.Event;
@@ -73,8 +75,11 @@ public class TestContainerReportHandler implements EventPublisher {
     //GIVEN
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+    EventQueue eventQueue = new EventQueue();
+    PipelineManager pipelineManager =
+        new SCMPipelineManager(conf, nodeManager, eventQueue);
     SCMContainerManager containerManager = new SCMContainerManager(
-        conf, nodeManager, new EventQueue());
+        conf, nodeManager, pipelineManager, eventQueue);
 
     ReplicationActivityStatus replicationActivityStatus =
         new ReplicationActivityStatus();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index 69a3b31..446eb58 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Set;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -25,9 +26,10 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
 
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -102,19 +104,20 @@ public class TestContainerStateManager {
 
   private ContainerInfo allocateContainer() throws IOException {
 
-    PipelineSelector pipelineSelector = Mockito.mock(PipelineSelector.class);
+    PipelineManager pipelineManager = Mockito.mock(SCMPipelineManager.class);
 
-    Pipeline pipeline = new Pipeline("leader", HddsProtos.LifeCycleState.CLOSED,
-        HddsProtos.ReplicationType.STAND_ALONE,
-        HddsProtos.ReplicationFactor.THREE,
-        PipelineID.randomId());
+    Pipeline pipeline =
+        Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED)
+            .setId(PipelineID.randomId())
+            .setType(HddsProtos.ReplicationType.STAND_ALONE)
+            .setFactor(HddsProtos.ReplicationFactor.THREE)
+            .setNodes(new ArrayList<>()).build();
 
-    when(pipelineSelector
-        .getReplicationPipeline(HddsProtos.ReplicationType.STAND_ALONE,
-            HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline);
+    when(pipelineManager.createPipeline(HddsProtos.ReplicationType.STAND_ALONE,
+        HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline);
 
-    return containerStateManager.allocateContainer(
-        pipelineSelector, HddsProtos.ReplicationType.STAND_ALONE,
+    return containerStateManager.allocateContainer(pipelineManager,
+        HddsProtos.ReplicationType.STAND_ALONE,
         HddsProtos.ReplicationFactor.THREE, "root");
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index 75f8b8c..fa0f084 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -24,13 +24,15 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -59,6 +61,7 @@ import java.util.concurrent.TimeUnit;
 public class TestSCMContainerManager {
   private static SCMContainerManager containerManager;
   private static MockNodeManager nodeManager;
+  private static PipelineManager pipelineManager;
   private static File testDir;
   private static XceiverClientManager xceiverClientManager;
   private static String containerOwner = "OZONE";
@@ -85,8 +88,10 @@ public class TestSCMContainerManager {
       throw new IOException("Unable to create test directory path");
     }
     nodeManager = new MockNodeManager(true, 10);
+    pipelineManager =
+        new SCMPipelineManager(conf, nodeManager, new EventQueue());
     containerManager = new SCMContainerManager(conf, nodeManager,
-        new EventQueue());
+        pipelineManager, new EventQueue());
     xceiverClientManager = new XceiverClientManager(conf);
     random = new Random();
   }
@@ -96,6 +101,9 @@ public class TestSCMContainerManager {
     if(containerManager != null) {
       containerManager.close();
     }
+    if (pipelineManager != null) {
+      pipelineManager.close();
+    }
     FileUtil.fullyDelete(testDir);
   }
 
@@ -130,7 +138,7 @@ public class TestSCMContainerManager {
 
       Assert.assertNotNull(containerInfo);
       Assert.assertNotNull(containerInfo.getPipeline());
-      pipelineList.add(containerInfo.getPipeline().getLeader()
+      pipelineList.add(containerInfo.getPipeline().getFirstNode()
           .getUuid());
     }
     Assert.assertTrue(pipelineList.size() > 5);
@@ -145,8 +153,8 @@ public class TestSCMContainerManager {
     Pipeline pipeline  = containerInfo.getPipeline();
     Assert.assertNotNull(pipeline);
     Pipeline newPipeline = containerInfo.getPipeline();
-    Assert.assertEquals(pipeline.getLeader().getUuid(),
-        newPipeline.getLeader().getUuid());
+    Assert.assertEquals(pipeline.getFirstNode().getUuid(),
+        newPipeline.getFirstNode().getUuid());
   }
 
   @Test
@@ -191,15 +199,15 @@ public class TestSCMContainerManager {
     contInfo = containerManager.getContainer(contInfo.containerID());
     Assert.assertEquals(contInfo.getState(), LifeCycleState.CLOSED);
     Pipeline pipeline = containerWithPipeline.getPipeline();
-    containerManager.getPipelineSelector().finalizePipeline(pipeline);
+    pipelineManager.finalizePipeline(pipeline.getId());
 
     ContainerWithPipeline containerWithPipeline2 = containerManager
         .getContainerWithPipeline(contInfo.containerID());
     pipeline = containerWithPipeline2.getPipeline();
     Assert.assertNotEquals(containerWithPipeline, containerWithPipeline2);
     Assert.assertNotNull("Pipeline should not be null", pipeline);
-    Assert.assertTrue(pipeline.getDatanodeHosts().contains(dn1.getHostName()));
-    Assert.assertTrue(pipeline.getDatanodeHosts().contains(dn2.getHostName()));
+    Assert.assertTrue(pipeline.getNodes().contains(dn1));
+    Assert.assertTrue(pipeline.getNodes().contains(dn2));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index b0951c8..571a5fb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -20,23 +20,22 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
 import java.util.stream.IntStream;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationRequestToRepeat;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -227,18 +226,16 @@ public class TestReplicationManager {
   public static Pipeline createPipeline(Iterable<DatanodeDetails> ids)
       throws IOException {
     Objects.requireNonNull(ids, "ids == null");
-    final Iterator<DatanodeDetails> i = ids.iterator();
-    Preconditions.checkArgument(i.hasNext());
-    final DatanodeDetails leader = i.next();
-    final Pipeline pipeline =
-        new Pipeline(leader.getUuidString(), LifeCycleState.OPEN,
-            ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
-            PipelineID.randomId());
-    pipeline.addMember(leader);
-    while (i.hasNext()) {
-      pipeline.addMember(i.next());
-    }
-    return pipeline;
+    Preconditions.checkArgument(ids.iterator().hasNext());
+    List<DatanodeDetails> dns = new ArrayList<>();
+    ids.forEach(dns::add);
+    return Pipeline.newBuilder()
+        .setState(Pipeline.PipelineState.OPEN)
+        .setId(PipelineID.randomId())
+        .setType(HddsProtos.ReplicationType.STAND_ALONE)
+        .setFactor(ReplicationFactor.ONE)
+        .setNodes(dns)
+        .build();
   }
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index fb08ad2..e283732 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -102,7 +104,9 @@ public class TestContainerPlacement {
     EventQueue eventQueue = new EventQueue();
     final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
         OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-    return new SCMContainerManager(config, scmNodeManager,
+    PipelineManager pipelineManager =
+        new SCMPipelineManager(config, scmNodeManager, eventQueue);
+    return new SCMContainerManager(config, scmNodeManager, pipelineManager,
         eventQueue);
 
   }
@@ -156,7 +160,7 @@ public class TestContainerPlacement {
           xceiverClientManager.getType(),
           xceiverClientManager.getFactor(), "OZONE");
       assertEquals(xceiverClientManager.getFactor().getNumber(),
-          containerWithPipeline.getPipeline().getMachines().size());
+          containerWithPipeline.getPipeline().getNodes().size());
     } finally {
       IOUtils.closeQuietly(containerManager);
       IOUtils.closeQuietly(nodeManager);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index d971e68..985fa2c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
@@ -75,7 +77,10 @@ public class TestDeadNodeHandler {
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, storageDir);
     eventQueue = new EventQueue();
     nodeManager = new SCMNodeManager(conf, "cluster1", null, eventQueue);
-    containerManager = new SCMContainerManager(conf, nodeManager, eventQueue);
+    PipelineManager pipelineManager =
+        new SCMPipelineManager(conf, nodeManager, eventQueue);
+    containerManager = new SCMContainerManager(conf, nodeManager,
+        pipelineManager, eventQueue);
     deadNodeHandler = new DeadNodeHandler(nodeManager, containerManager);
     eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
     publisher = Mockito.mock(EventPublisher.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index ed95709..c899bda 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 390746f..b2ddf39 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.statemachine

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 069f1af..fbc3420 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -20,8 +20,8 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
index 0135df3..bf37718 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java
@@ -82,10 +82,7 @@ public class InfoSubcommand implements Callable<Void> {
       LOG.info("Container Metadata: {}", metadataStr);
 
       // Print pipeline of an existing container.
-      LOG.info("LeaderID: {}", container.getPipeline()
-          .getLeader().getHostName());
-      String machinesStr = container.getPipeline()
-          .getMachines().stream().map(
+      String machinesStr = container.getPipeline().getNodes().stream().map(
               DatanodeDetails::getHostName).collect(Collectors.joining(","));
       LOG.info("Datanodes: [{}]", machinesStr);
       return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 3772c59..0c09fc8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -282,7 +282,10 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
 
       // irrespective of the container state, we will always read via Standalone
       // protocol.
-      pipeline.setType(HddsProtos.ReplicationType.STAND_ALONE);
+      if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
+        pipeline = Pipeline.newBuilder(pipeline)
+            .setType(HddsProtos.ReplicationType.STAND_ALONE).build();
+      }
       XceiverClientSpi xceiverClient = xceiverClientManager
           .acquireClient(pipeline);
       boolean success = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index 7a0fa5c..74cbea4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -24,9 +24,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.After;
@@ -50,7 +47,7 @@ public class TestNode2PipelineMap {
   private static StorageContainerManager scm;
   private static ContainerWithPipeline ratisContainer;
   private static ContainerManager containerManager;
-  private static PipelineSelector pipelineSelector;
+  private static PipelineManager pipelineManager;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -66,7 +63,7 @@ public class TestNode2PipelineMap {
     containerManager = scm.getContainerManager();
     ratisContainer = containerManager.allocateContainer(
         RATIS, THREE, "testOwner");
-    pipelineSelector = containerManager.getPipelineSelector();
+    pipelineManager = scm.getPipelineManager();
   }
 
   /**
@@ -83,15 +80,15 @@ public class TestNode2PipelineMap {
   @Test
   public void testPipelineMap() throws IOException {
 
-    Set<ContainerID> set = pipelineSelector.getOpenContainerIDsByPipeline(
-        ratisContainer.getPipeline().getId());
+    Set<ContainerID> set = pipelineManager
+        .getContainersInPipeline(ratisContainer.getPipeline().getId());
 
     ContainerID cId = ratisContainer.getContainerInfo().containerID();
     Assert.assertEquals(1, set.size());
     set.forEach(containerID ->
             Assert.assertEquals(containerID, cId));
 
-    List<DatanodeDetails> dns = ratisContainer.getPipeline().getMachines();
+    List<DatanodeDetails> dns = ratisContainer.getPipeline().getNodes();
     Assert.assertEquals(3, dns.size());
 
     // get pipeline details by dnid
@@ -112,18 +109,14 @@ public class TestNode2PipelineMap {
         .updateContainerState(cId, HddsProtos.LifeCycleEvent.FINALIZE);
     containerManager
         .updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
-    Set<ContainerID> set2 = pipelineSelector.getOpenContainerIDsByPipeline(
+    Set<ContainerID> set2 = pipelineManager.getContainersInPipeline(
         ratisContainer.getPipeline().getId());
     Assert.assertEquals(0, set2.size());
 
-    try {
-      pipelineSelector.updatePipelineState(ratisContainer.getPipeline(),
-          HddsProtos.LifeCycleEvent.CLOSE);
-      Assert.fail("closing of pipeline without finalize should fail");
-    } catch (Exception e) {
-      Assert.assertTrue(e instanceof SCMException);
-      Assert.assertEquals(((SCMException)e).getResult(),
-          SCMException.ResultCodes.FAILED_TO_CHANGE_PIPELINE_STATE);
-    }
+    pipelineManager.finalizePipeline(ratisContainer.getPipeline().getId());
+    pipelineManager.removePipeline(ratisContainer.getPipeline().getId());
+    pipelines = scm.getScmNodeManager()
+        .getPipelineByDnID(dns.get(0).getUuid());
+    Assert.assertEquals(0, pipelines.size());
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
index f3e1ece..45886c6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
@@ -20,12 +20,10 @@ package org.apache.hadoop.hdds.scm.pipeline;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -53,6 +51,7 @@ public class TestNodeFailure {
   private static ContainerWithPipeline ratisContainer1;
   private static ContainerWithPipeline ratisContainer2;
   private static ContainerManager containerManager;
+  private static PipelineManager pipelineManager;
   private static long timeForFailure;
 
   /**
@@ -76,6 +75,7 @@ public class TestNodeFailure {
     cluster.waitForClusterToBeReady();
     StorageContainerManager scm = cluster.getStorageContainerManager();
     containerManager = scm.getContainerManager();
+    pipelineManager = scm.getPipelineManager();
     ratisContainer1 = containerManager.allocateContainer(
         RATIS, THREE, "testOwner");
     ratisContainer2 = containerManager.allocateContainer(
@@ -102,21 +102,21 @@ public class TestNodeFailure {
   @Test
   public void testPipelineFail() throws InterruptedException, IOException,
       TimeoutException {
-    Assert.assertEquals(ratisContainer1.getPipeline().getLifeCycleState(),
-        HddsProtos.LifeCycleState.OPEN);
+    Assert.assertEquals(ratisContainer1.getPipeline().getPipelineState(),
+        Pipeline.PipelineState.OPEN);
     Pipeline pipelineToFail = ratisContainer1.getPipeline();
-    DatanodeDetails dnToFail = pipelineToFail.getMachines().get(0);
+    DatanodeDetails dnToFail = pipelineToFail.getFirstNode();
     cluster.shutdownHddsDatanode(dnToFail);
 
     // wait for sufficient time for the callback to be triggered
     Thread.sleep(3 * timeForFailure);
 
-    Assert.assertEquals(HddsProtos.LifeCycleState.CLOSED,
-        ratisContainer1.getPipeline().getLifeCycleState());
-    Assert.assertEquals(HddsProtos.LifeCycleState.OPEN,
-        ratisContainer2.getPipeline().getLifeCycleState());
-    Assert.assertNull(containerManager.getPipelineSelector()
-        .getPipeline(pipelineToFail.getId()));
+    Assert.assertEquals(Pipeline.PipelineState.CLOSED,
+        pipelineManager.getPipeline(ratisContainer1.getPipeline().getId())
+            .getPipelineState());
+    Assert.assertEquals(Pipeline.PipelineState.OPEN,
+        pipelineManager.getPipeline(ratisContainer2.getPipeline().getId())
+            .getPipelineState());
     // Now restart the datanode and make sure that a new pipeline is created.
     cluster.restartHddsDatanode(dnToFail);
     ContainerWithPipeline ratisContainer3 =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index 52a493d..42d3063 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.AfterClass;
@@ -50,7 +48,7 @@ public class TestPipelineClose {
   private static ContainerWithPipeline ratisContainer1;
   private static ContainerWithPipeline ratisContainer2;
   private static ContainerManager containerManager;
-  private static PipelineSelector pipelineSelector;
+  private static PipelineManager pipelineManager;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -68,7 +66,7 @@ public class TestPipelineClose {
         .allocateContainer(RATIS, THREE, "testOwner");
     ratisContainer2 = containerManager
         .allocateContainer(RATIS, THREE, "testOwner");
-    pipelineSelector = containerManager.getPipelineSelector();
+    pipelineManager = scm.getPipelineManager();
     // At this stage, there should be 2 pipeline one with 1 open container each.
     // Try closing the both the pipelines, one with a closed container and
     // the other with an open container.
@@ -87,8 +85,8 @@ public class TestPipelineClose {
 
   @Test
   public void testPipelineCloseWithClosedContainer() throws IOException {
-    Set<ContainerID> set = pipelineSelector.getOpenContainerIDsByPipeline(
-        ratisContainer1.getPipeline().getId());
+    Set<ContainerID> set = pipelineManager
+        .getContainersInPipeline(ratisContainer1.getPipeline().getId());
 
     ContainerID cId = ratisContainer1.getContainerInfo().containerID();
     Assert.assertEquals(1, set.size());
@@ -105,17 +103,17 @@ public class TestPipelineClose {
     containerManager
         .updateContainerState(cId, HddsProtos.LifeCycleEvent.CLOSE);
 
-    Set<ContainerID> setClosed = pipelineSelector.getOpenContainerIDsByPipeline(
-        ratisContainer1.getPipeline().getId());
+    Set<ContainerID> setClosed = pipelineManager
+        .getContainersInPipeline(ratisContainer1.getPipeline().getId());
     Assert.assertEquals(0, setClosed.size());
 
-    pipelineSelector.finalizePipeline(ratisContainer1.getPipeline());
-    Pipeline pipeline1 = pipelineSelector
+    pipelineManager.finalizePipeline(ratisContainer1.getPipeline().getId());
+    Pipeline pipeline1 = pipelineManager
         .getPipeline(ratisContainer1.getPipeline().getId());
-    Assert.assertNull(pipeline1);
-    Assert.assertEquals(ratisContainer1.getPipeline().getLifeCycleState(),
-        HddsProtos.LifeCycleState.CLOSED);
-    for (DatanodeDetails dn : ratisContainer1.getPipeline().getMachines()) {
+    Assert.assertEquals(pipeline1.getPipelineState(),
+        Pipeline.PipelineState.CLOSED);
+    pipelineManager.removePipeline(pipeline1.getId());
+    for (DatanodeDetails dn : ratisContainer1.getPipeline().getNodes()) {
       // Assert that the pipeline has been removed from Node2PipelineMap as well
       Assert.assertEquals(scm.getScmNodeManager().getPipelineByDnID(
           dn.getUuid()).size(), 0);
@@ -125,7 +123,7 @@ public class TestPipelineClose {
   @Test
   public void testPipelineCloseWithOpenContainer() throws IOException,
       TimeoutException, InterruptedException {
-    Set<ContainerID> setOpen = pipelineSelector.getOpenContainerIDsByPipeline(
+    Set<ContainerID> setOpen = pipelineManager.getContainersInPipeline(
         ratisContainer2.getPipeline().getId());
     Assert.assertEquals(1, setOpen.size());
 
@@ -134,12 +132,13 @@ public class TestPipelineClose {
         .updateContainerState(cId2, HddsProtos.LifeCycleEvent.CREATE);
     containerManager
         .updateContainerState(cId2, HddsProtos.LifeCycleEvent.CREATED);
-    pipelineSelector.finalizePipeline(ratisContainer2.getPipeline());
-    Assert.assertEquals(ratisContainer2.getPipeline().getLifeCycleState(),
-        HddsProtos.LifeCycleState.CLOSING);
-    Pipeline pipeline2 = pipelineSelector
+    pipelineManager.finalizePipeline(ratisContainer2.getPipeline().getId());
+    Assert.assertEquals(
+        pipelineManager.getPipeline(ratisContainer2.getPipeline().getId())
+            .getPipelineState(), Pipeline.PipelineState.CLOSED);
+    Pipeline pipeline2 = pipelineManager
         .getPipeline(ratisContainer2.getPipeline().getId());
-    Assert.assertEquals(pipeline2.getLifeCycleState(),
-        HddsProtos.LifeCycleState.CLOSING);
+    Assert.assertEquals(pipeline2.getPipelineState(),
+        Pipeline.PipelineState.CLOSED);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
index 49fb2bc..fd6f76b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
@@ -91,7 +91,7 @@ public class TestPipelineStateManager {
     }
 
     // verify pipeline returned is same
-    Pipeline pipeline1 = stateManager.getPipeline(pipeline.getID());
+    Pipeline pipeline1 = stateManager.getPipeline(pipeline.getId());
     Assert.assertTrue(pipeline == pipeline1);
 
     // clean up
@@ -102,15 +102,17 @@ public class TestPipelineStateManager {
   public void testGetPipelines() throws IOException {
     Set<Pipeline> pipelines = new HashSet<>();
     Pipeline pipeline = createDummyPipeline(1);
-    pipelines.add(pipeline);
     stateManager.addPipeline(pipeline);
-    pipeline = createDummyPipeline(1);
+    stateManager.openPipeline(pipeline.getId());
     pipelines.add(pipeline);
+    pipeline = createDummyPipeline(1);
     stateManager.addPipeline(pipeline);
+    stateManager.openPipeline(pipeline.getId());
+    pipelines.add(pipeline);
 
-    Set<Pipeline> pipelines1 = new HashSet<>(stateManager.getPipelinesByType(
+    Set<Pipeline> pipelines1 = new HashSet<>(stateManager.getPipelines(
         HddsProtos.ReplicationType.RATIS));
-    Assert.assertEquals(pipelines, pipelines1);
+    Assert.assertEquals(pipelines1.size(), pipelines.size());
     // clean up
     for (Pipeline pipeline1 : pipelines) {
       removePipeline(pipeline1);
@@ -131,16 +133,16 @@ public class TestPipelineStateManager {
           stateManager.addPipeline(pipeline);
           pipelines.add(pipeline);
 
-          // 5 pipelines in allocated state for each type and factor
+          // 5 pipelines in open state for each type and factor
           pipeline = createDummyPipeline(type, factor, factor.getNumber());
           stateManager.addPipeline(pipeline);
-          stateManager.openPipeline(pipeline.getID());
+          stateManager.openPipeline(pipeline.getId());
           pipelines.add(pipeline);
 
-          // 5 pipelines in allocated state for each type and factor
+          // 5 pipelines in closed state for each type and factor
           pipeline = createDummyPipeline(type, factor, factor.getNumber());
           stateManager.addPipeline(pipeline);
-          stateManager.finalizePipeline(pipeline.getID());
+          stateManager.finalizePipeline(pipeline.getId());
           pipelines.add(pipeline);
         }
       }
@@ -152,8 +154,8 @@ public class TestPipelineStateManager {
           .values()) {
         // verify pipelines received
         List<Pipeline> pipelines1 =
-            stateManager.getPipelinesByTypeAndFactor(type, factor);
-        Assert.assertEquals(5, pipelines1.size());
+            stateManager.getPipelines(type, factor);
+        Assert.assertEquals(15, pipelines1.size());
         pipelines1.stream().forEach(p -> {
           Assert.assertEquals(p.getType(), type);
           Assert.assertEquals(p.getFactor(), factor);
@@ -168,40 +170,79 @@ public class TestPipelineStateManager {
   }
 
   @Test
+  public void testGetPipelinesByTypeAndState() throws IOException {
+    Set<Pipeline> pipelines = new HashSet<>();
+    for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType
+        .values()) {
+      HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
+      for (int i = 0; i < 5; i++) {
+        // 5 pipelines in allocated state for each type and factor
+        Pipeline pipeline =
+            createDummyPipeline(type, factor, factor.getNumber());
+        stateManager.addPipeline(pipeline);
+        pipelines.add(pipeline);
+
+        // 5 pipelines in open state for each type and factor
+        pipeline = createDummyPipeline(type, factor, factor.getNumber());
+        stateManager.addPipeline(pipeline);
+        stateManager.openPipeline(pipeline.getId());
+        pipelines.add(pipeline);
+
+        // 5 pipelines in closed state for each type and factor
+        pipeline = createDummyPipeline(type, factor, factor.getNumber());
+        stateManager.addPipeline(pipeline);
+        stateManager.finalizePipeline(pipeline.getId());
+        pipelines.add(pipeline);
+      }
+    }
+
+    for (HddsProtos.ReplicationType type : HddsProtos.ReplicationType
+        .values()) {
+      // verify pipelines received
+      List<Pipeline> pipelines1 = stateManager
+          .getPipelines(type, Pipeline.PipelineState.OPEN);
+      Assert.assertEquals(5, pipelines1.size());
+      pipelines1.forEach(p -> {
+        Assert.assertEquals(p.getType(), type);
+        Assert.assertEquals(p.getPipelineState(), Pipeline.PipelineState.OPEN);
+      });
+
+      pipelines1 = stateManager
+          .getPipelines(type, Pipeline.PipelineState.OPEN,
+              Pipeline.PipelineState.CLOSED, Pipeline.PipelineState.ALLOCATED);
+      Assert.assertEquals(15, pipelines1.size());
+    }
+
+    //clean up
+    for (Pipeline pipeline : pipelines) {
+      removePipeline(pipeline);
+    }
+  }
+
+  @Test
   public void testAddAndGetContainer() throws IOException {
     long containerID = 0;
     Pipeline pipeline = createDummyPipeline(1);
     stateManager.addPipeline(pipeline);
-    pipeline = stateManager.getPipeline(pipeline.getID());
-
-    try {
-      stateManager.addContainerToPipeline(pipeline.getID(),
-          ContainerID.valueof(++containerID));
-      Assert.fail("Container should not have been added");
-    } catch (IOException e) {
-      // add container possible only in container with open state
-      Assert.assertTrue(e.getMessage().contains("is not in open state"));
-    }
+    pipeline = stateManager.getPipeline(pipeline.getId());
+    stateManager.addContainerToPipeline(pipeline.getId(),
+        ContainerID.valueof(++containerID));
 
     // move pipeline to open state
-    stateManager.openPipeline(pipeline.getID());
-
-    // add three containers
-    stateManager.addContainerToPipeline(pipeline.getID(),
-        ContainerID.valueof(containerID));
-    stateManager.addContainerToPipeline(pipeline.getID(),
+    stateManager.openPipeline(pipeline.getId());
+    stateManager.addContainerToPipeline(pipeline.getId(),
         ContainerID.valueof(++containerID));
-    stateManager.addContainerToPipeline(pipeline.getID(),
+    stateManager.addContainerToPipeline(pipeline.getId(),
         ContainerID.valueof(++containerID));
 
     //verify the number of containers returned
     Set<ContainerID> containerIDs =
-        stateManager.getContainers(pipeline.getID());
+        stateManager.getContainers(pipeline.getId());
     Assert.assertEquals(containerIDs.size(), containerID);
 
     removePipeline(pipeline);
     try {
-      stateManager.addContainerToPipeline(pipeline.getID(),
+      stateManager.addContainerToPipeline(pipeline.getId(),
           ContainerID.valueof(++containerID));
       Assert.fail("Container should not have been added");
     } catch (IOException e) {
@@ -215,12 +256,12 @@ public class TestPipelineStateManager {
     Pipeline pipeline = createDummyPipeline(1);
     stateManager.addPipeline(pipeline);
     // close the pipeline
-    stateManager.openPipeline(pipeline.getID());
+    stateManager.openPipeline(pipeline.getId());
     stateManager
-        .addContainerToPipeline(pipeline.getID(), ContainerID.valueof(1));
+        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
 
     try {
-      stateManager.removePipeline(pipeline.getID());
+      stateManager.removePipeline(pipeline.getId());
       Assert.fail("Pipeline should not have been removed");
     } catch (IOException e) {
       // can not remove a pipeline which already has containers
@@ -228,10 +269,10 @@ public class TestPipelineStateManager {
     }
 
     // close the pipeline
-    stateManager.finalizePipeline(pipeline.getID());
+    stateManager.finalizePipeline(pipeline.getId());
 
     try {
-      stateManager.removePipeline(pipeline.getID());
+      stateManager.removePipeline(pipeline.getId());
       Assert.fail("Pipeline should not have been removed");
     } catch (IOException e) {
       // can not remove a pipeline which already has containers
@@ -248,33 +289,33 @@ public class TestPipelineStateManager {
     Pipeline pipeline = createDummyPipeline(1);
     // create an open pipeline in stateMap
     stateManager.addPipeline(pipeline);
-    stateManager.openPipeline(pipeline.getID());
+    stateManager.openPipeline(pipeline.getId());
 
-    stateManager.addContainerToPipeline(pipeline.getID(),
+    stateManager.addContainerToPipeline(pipeline.getId(),
         ContainerID.valueof(containerID));
-    Assert.assertEquals(1, stateManager.getContainers(pipeline.getID()).size());
-    stateManager.removeContainerFromPipeline(pipeline.getID(),
+    Assert.assertEquals(1, stateManager.getContainers(pipeline.getId()).size());
+    stateManager.removeContainerFromPipeline(pipeline.getId(),
         ContainerID.valueof(containerID));
-    Assert.assertEquals(0, stateManager.getContainers(pipeline.getID()).size());
+    Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size());
 
     // add two containers in the pipeline
-    stateManager.addContainerToPipeline(pipeline.getID(),
+    stateManager.addContainerToPipeline(pipeline.getId(),
         ContainerID.valueof(++containerID));
-    stateManager.addContainerToPipeline(pipeline.getID(),
+    stateManager.addContainerToPipeline(pipeline.getId(),
         ContainerID.valueof(++containerID));
-    Assert.assertEquals(2, stateManager.getContainers(pipeline.getID()).size());
+    Assert.assertEquals(2, stateManager.getContainers(pipeline.getId()).size());
 
     // move pipeline to closing state
-    stateManager.finalizePipeline(pipeline.getID());
+    stateManager.finalizePipeline(pipeline.getId());
 
-    stateManager.removeContainerFromPipeline(pipeline.getID(),
+    stateManager.removeContainerFromPipeline(pipeline.getId(),
         ContainerID.valueof(containerID));
-    stateManager.removeContainerFromPipeline(pipeline.getID(),
+    stateManager.removeContainerFromPipeline(pipeline.getId(),
         ContainerID.valueof(--containerID));
-    Assert.assertEquals(0, stateManager.getContainers(pipeline.getID()).size());
+    Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size());
 
     // clean up
-    stateManager.removePipeline(pipeline.getID());
+    stateManager.removePipeline(pipeline.getId());
   }
 
   @Test
@@ -282,30 +323,30 @@ public class TestPipelineStateManager {
     Pipeline pipeline = createDummyPipeline(1);
     stateManager.addPipeline(pipeline);
     // finalize on ALLOCATED pipeline
-    stateManager.finalizePipeline(pipeline.getID());
+    stateManager.finalizePipeline(pipeline.getId());
     Assert.assertEquals(Pipeline.PipelineState.CLOSED,
-        stateManager.getPipeline(pipeline.getID()).getPipelineState());
+        stateManager.getPipeline(pipeline.getId()).getPipelineState());
     // clean up
     removePipeline(pipeline);
 
     pipeline = createDummyPipeline(1);
     stateManager.addPipeline(pipeline);
-    stateManager.openPipeline(pipeline.getID());
+    stateManager.openPipeline(pipeline.getId());
     // finalize on OPEN pipeline
-    stateManager.finalizePipeline(pipeline.getID());
+    stateManager.finalizePipeline(pipeline.getId());
     Assert.assertEquals(Pipeline.PipelineState.CLOSED,
-        stateManager.getPipeline(pipeline.getID()).getPipelineState());
+        stateManager.getPipeline(pipeline.getId()).getPipelineState());
     // clean up
     removePipeline(pipeline);
 
     pipeline = createDummyPipeline(1);
     stateManager.addPipeline(pipeline);
-    stateManager.openPipeline(pipeline.getID());
-    stateManager.finalizePipeline(pipeline.getID());
+    stateManager.openPipeline(pipeline.getId());
+    stateManager.finalizePipeline(pipeline.getId());
     // finalize should work on already closed pipeline
-    stateManager.finalizePipeline(pipeline.getID());
+    stateManager.finalizePipeline(pipeline.getId());
     Assert.assertEquals(Pipeline.PipelineState.CLOSED,
-        stateManager.getPipeline(pipeline.getID()).getPipelineState());
+        stateManager.getPipeline(pipeline.getId()).getPipelineState());
     // clean up
     removePipeline(pipeline);
   }
@@ -315,25 +356,25 @@ public class TestPipelineStateManager {
     Pipeline pipeline = createDummyPipeline(1);
     stateManager.addPipeline(pipeline);
     // open on ALLOCATED pipeline
-    stateManager.openPipeline(pipeline.getID());
+    stateManager.openPipeline(pipeline.getId());
     Assert.assertEquals(Pipeline.PipelineState.OPEN,
-        stateManager.getPipeline(pipeline.getID()).getPipelineState());
+        stateManager.getPipeline(pipeline.getId()).getPipelineState());
 
-    stateManager.openPipeline(pipeline.getID());
+    stateManager.openPipeline(pipeline.getId());
     // open should work on already open pipeline
     Assert.assertEquals(Pipeline.PipelineState.OPEN,
-        stateManager.getPipeline(pipeline.getID()).getPipelineState());
+        stateManager.getPipeline(pipeline.getId()).getPipelineState());
     // clean up
     removePipeline(pipeline);
   }
 
   private void removePipeline(Pipeline pipeline) throws IOException {
-    stateManager.finalizePipeline(pipeline.getID());
+    stateManager.finalizePipeline(pipeline.getId());
     Set<ContainerID> containerIDs =
-        stateManager.getContainers(pipeline.getID());
+        stateManager.getContainers(pipeline.getId());
     for (ContainerID containerID : containerIDs) {
-      stateManager.removeContainerFromPipeline(pipeline.getID(), containerID);
+      stateManager.removeContainerFromPipeline(pipeline.getId(), containerID);
     }
-    stateManager.removePipeline(pipeline.getID());
+    stateManager.removePipeline(pipeline.getId());
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index 184143a..0025c2e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -58,7 +58,7 @@ public class TestRatisPipelineProvider {
     Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
     Assert.assertEquals(pipeline.getFactor(), factor);
     Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.ALLOCATED);
+        Pipeline.PipelineState.OPEN);
     Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
 
     factor = HddsProtos.ReplicationFactor.ONE;
@@ -71,7 +71,7 @@ public class TestRatisPipelineProvider {
     Assert.assertEquals(pipeline1.getType(), HddsProtos.ReplicationType.RATIS);
     Assert.assertEquals(pipeline1.getFactor(), factor);
     Assert.assertEquals(pipeline1.getPipelineState(),
-        Pipeline.PipelineState.ALLOCATED);
+        Pipeline.PipelineState.OPEN);
     Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber());
   }
 
@@ -86,19 +86,20 @@ public class TestRatisPipelineProvider {
   @Test
   public void testCreatePipelineWithNodes() throws IOException {
     HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
-    Pipeline pipeline = provider.create(createListOfNodes(factor.getNumber()));
+    Pipeline pipeline =
+        provider.create(factor, createListOfNodes(factor.getNumber()));
     Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
     Assert.assertEquals(pipeline.getFactor(), factor);
     Assert.assertEquals(
-        pipeline.getPipelineState(), Pipeline.PipelineState.ALLOCATED);
+        pipeline.getPipelineState(), Pipeline.PipelineState.OPEN);
     Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
 
     factor = HddsProtos.ReplicationFactor.ONE;
-    pipeline = provider.create(createListOfNodes(factor.getNumber()));
+    pipeline = provider.create(factor, createListOfNodes(factor.getNumber()));
     Assert.assertEquals(pipeline.getType(), HddsProtos.ReplicationType.RATIS);
     Assert.assertEquals(pipeline.getFactor(), factor);
     Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.ALLOCATED);
+        Pipeline.PipelineState.OPEN);
     Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
index 0f9ad55..dab7fb6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.TestSCMContainerManager;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -54,7 +53,7 @@ public class TestSCMPipelineManager {
   public static void setUp() throws Exception {
     conf = new OzoneConfiguration();
     testDir = GenericTestUtils
-        .getTestDir(TestSCMContainerManager.class.getSimpleName());
+        .getTestDir(TestSCMPipelineManager.class.getSimpleName());
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     boolean folderExisted = testDir.exists() || testDir.mkdirs();
     if (!folderExisted) {
@@ -83,16 +82,18 @@ public class TestSCMPipelineManager {
 
     // new pipeline manager should be able to load the pipelines from the db
     pipelineManager =
-        new SCMPipelineManager(conf, nodeManager,
-            new EventQueue());
+        new SCMPipelineManager(conf, nodeManager, new EventQueue());
+    for (Pipeline p : pipelines) {
+      pipelineManager.openPipeline(p.getId());
+    }
     List<Pipeline> pipelineList =
-        pipelineManager.getPipelinesByType(HddsProtos.ReplicationType.RATIS);
+        pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS);
     Assert.assertEquals(pipelines, new HashSet<>(pipelineList));
 
     // clean up
     for (Pipeline pipeline : pipelines) {
-      pipelineManager.finalizePipeline(pipeline.getID());
-      pipelineManager.removePipeline(pipeline.getID());
+      pipelineManager.finalizePipeline(pipeline.getId());
+      pipelineManager.removePipeline(pipeline.getId());
     }
     pipelineManager.close();
   }
@@ -104,13 +105,13 @@ public class TestSCMPipelineManager {
     Pipeline pipeline = pipelineManager
         .createPipeline(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE);
-    pipelineManager.openPipeline(pipeline.getID());
+    pipelineManager.openPipeline(pipeline.getId());
     pipelineManager
-        .addContainerToPipeline(pipeline.getID(), ContainerID.valueof(1));
-    pipelineManager.finalizePipeline(pipeline.getID());
+        .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
+    pipelineManager.finalizePipeline(pipeline.getId());
     pipelineManager
-        .removeContainerFromPipeline(pipeline.getID(), ContainerID.valueof(1));
-    pipelineManager.removePipeline(pipeline.getID());
+        .removeContainerFromPipeline(pipeline.getId(), ContainerID.valueof(1));
+    pipelineManager.removePipeline(pipeline.getId());
     pipelineManager.close();
 
     // new pipeline manager should not be able to load removed pipelines
@@ -118,7 +119,7 @@ public class TestSCMPipelineManager {
         new SCMPipelineManager(conf, nodeManager,
             new EventQueue());
     try {
-      pipelineManager.getPipeline(pipeline.getID());
+      pipelineManager.getPipeline(pipeline.getId());
       Assert.fail("Pipeline should not have been retrieved");
     } catch (IOException e) {
       Assert.assertTrue(e.getMessage().contains("not found"));
@@ -138,36 +139,36 @@ public class TestSCMPipelineManager {
         .createPipeline(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE);
     Assert
-        .assertFalse(pipelineManager.getPipeline(pipeline.getID()).isHealthy());
+        .assertFalse(pipelineManager.getPipeline(pipeline.getId()).isHealthy());
     Assert
-        .assertFalse(pipelineManager.getPipeline(pipeline.getID()).isOpen());
+        .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen());
 
     // get pipeline report from each dn in the pipeline
     PipelineReportHandler pipelineReportHandler =
         new PipelineReportHandler(pipelineManager, conf);
     for (DatanodeDetails dn: pipeline.getNodes()) {
       PipelineReportFromDatanode pipelineReportFromDatanode =
-          TestUtils.getRandomPipelineReportFromDatanode(dn, pipeline.getID());
+          TestUtils.getRandomPipelineReportFromDatanode(dn, pipeline.getId());
       // pipeline is not healthy until all dns report
       Assert.assertFalse(
-          pipelineManager.getPipeline(pipeline.getID()).isHealthy());
+          pipelineManager.getPipeline(pipeline.getId()).isHealthy());
       pipelineReportHandler
           .onMessage(pipelineReportFromDatanode, new EventQueue());
     }
 
     // pipeline is healthy when all dns report
     Assert
-        .assertTrue(pipelineManager.getPipeline(pipeline.getID()).isHealthy());
+        .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isHealthy());
     // pipeline should now move to open state
     Assert
-        .assertTrue(pipelineManager.getPipeline(pipeline.getID()).isOpen());
+        .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen());
 
     // close the pipeline
-    pipelineManager.finalizePipeline(pipeline.getID());
+    pipelineManager.finalizePipeline(pipeline.getId());
 
     for (DatanodeDetails dn: pipeline.getNodes()) {
       PipelineReportFromDatanode pipelineReportFromDatanode =
-          TestUtils.getRandomPipelineReportFromDatanode(dn, pipeline.getID());
+          TestUtils.getRandomPipelineReportFromDatanode(dn, pipeline.getId());
       // pipeline report for a closed pipeline should destroy the pipeline
       // and remove it from the pipeline manager
       pipelineReportHandler
@@ -175,7 +176,7 @@ public class TestSCMPipelineManager {
     }
 
     try {
-      pipelineManager.getPipeline(pipeline.getID());
+      pipelineManager.getPipeline(pipeline.getId());
       Assert.fail("Pipeline should not have been retrieved");
     } catch (IOException e) {
       Assert.assertTrue(e.getMessage().contains("not found"));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
index bac4022..0fa8649 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.hdds.scm.pipeline;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.junit.AfterClass;
@@ -48,6 +46,7 @@ public class TestSCMRestart {
   private static Pipeline ratisPipeline2;
   private static ContainerManager containerManager;
   private static ContainerManager newContainerManager;
+  private static PipelineManager pipelineManager;
 
   /**
    * Create a MiniDFSCluster for testing.
@@ -65,6 +64,7 @@ public class TestSCMRestart {
     cluster.waitForClusterToBeReady();
     StorageContainerManager scm = cluster.getStorageContainerManager();
     containerManager = scm.getContainerManager();
+    pipelineManager = scm.getPipelineManager();
     ratisPipeline1 = containerManager.allocateContainer(
         RATIS, THREE, "Owner1").getPipeline();
     ratisPipeline2 = containerManager.allocateContainer(
@@ -75,6 +75,7 @@ public class TestSCMRestart {
     cluster.restartStorageContainerManager();
     newContainerManager = cluster.getStorageContainerManager()
         .getContainerManager();
+    pipelineManager = cluster.getStorageContainerManager().getPipelineManager();
   }
 
   /**
@@ -90,25 +91,15 @@ public class TestSCMRestart {
   @Test
   public void testPipelineWithScmRestart() throws IOException {
     // After restart make sure that the pipeline are still present
-    Pipeline ratisPipeline1AfterRestart = newContainerManager
-        .getPipelineSelector().getPipeline(ratisPipeline1.getId());
-    Pipeline ratisPipeline2AfterRestart = newContainerManager
-        .getPipelineSelector().getPipeline(ratisPipeline2.getId());
+    Pipeline ratisPipeline1AfterRestart =
+        pipelineManager.getPipeline(ratisPipeline1.getId());
+    Pipeline ratisPipeline2AfterRestart =
+        pipelineManager.getPipeline(ratisPipeline2.getId());
     Assert.assertNotSame(ratisPipeline1AfterRestart, ratisPipeline1);
     Assert.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2);
     Assert.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1);
     Assert.assertEquals(ratisPipeline2AfterRestart, ratisPipeline2);
 
-    for (DatanodeDetails dn : ratisPipeline1.getMachines()) {
-      Assert.assertEquals(dn, ratisPipeline1AfterRestart.getDatanodes()
-              .get(dn.getUuidString()));
-    }
-
-    for (DatanodeDetails dn : ratisPipeline2.getMachines()) {
-      Assert.assertEquals(dn, ratisPipeline2AfterRestart.getDatanodes()
-              .get(dn.getUuidString()));
-    }
-
     // Try creating a new ratis pipeline, it should be from the same pipeline
     // as was before restart
     Pipeline newRatisPipeline = newContainerManager

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
index b44dbef..22fd95b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSimplePipelineProvider.java
@@ -57,7 +57,7 @@ public class TestSimplePipelineProvider {
         HddsProtos.ReplicationType.STAND_ALONE);
     Assert.assertEquals(pipeline.getFactor(), factor);
     Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.ALLOCATED);
+        Pipeline.PipelineState.OPEN);
     Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
 
     factor = HddsProtos.ReplicationFactor.ONE;
@@ -67,7 +67,7 @@ public class TestSimplePipelineProvider {
         HddsProtos.ReplicationType.STAND_ALONE);
     Assert.assertEquals(pipeline1.getFactor(), factor);
     Assert.assertEquals(pipeline1.getPipelineState(),
-        Pipeline.PipelineState.ALLOCATED);
+        Pipeline.PipelineState.OPEN);
     Assert.assertEquals(pipeline1.getNodes().size(), factor.getNumber());
   }
 
@@ -82,21 +82,22 @@ public class TestSimplePipelineProvider {
   @Test
   public void testCreatePipelineWithNodes() throws IOException {
     HddsProtos.ReplicationFactor factor = HddsProtos.ReplicationFactor.THREE;
-    Pipeline pipeline = provider.create(createListOfNodes(factor.getNumber()));
+    Pipeline pipeline =
+        provider.create(factor, createListOfNodes(factor.getNumber()));
     Assert.assertEquals(pipeline.getType(),
         HddsProtos.ReplicationType.STAND_ALONE);
     Assert.assertEquals(pipeline.getFactor(), factor);
     Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.ALLOCATED);
+        Pipeline.PipelineState.OPEN);
     Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
 
     factor = HddsProtos.ReplicationFactor.ONE;
-    pipeline = provider.create(createListOfNodes(factor.getNumber()));
+    pipeline = provider.create(factor, createListOfNodes(factor.getNumber()));
     Assert.assertEquals(pipeline.getType(),
         HddsProtos.ReplicationType.STAND_ALONE);
     Assert.assertEquals(pipeline.getFactor(), factor);
     Assert.assertEquals(pipeline.getPipelineState(),
-        Pipeline.PipelineState.ALLOCATED);
+        Pipeline.PipelineState.OPEN);
     Assert.assertEquals(pipeline.getNodes().size(), factor.getNumber());
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
index a83c16e..871f389 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
index c69a94c..78a8511 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.
     ContainerPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index bf6a189..e616eef 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.TestGenericTestUtils;
 import org.junit.AfterClass;
@@ -40,6 +40,7 @@ import org.junit.Test;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 
@@ -91,18 +92,20 @@ public class TestMiniOzoneCluster {
     assertEquals(numberOfNodes, datanodes.size());
     for(HddsDatanodeService dn : datanodes) {
       // Create a single member pipe line
-      DatanodeDetails datanodeDetails = dn.getDatanodeDetails();
-      final Pipeline pipeline =
-          new Pipeline(datanodeDetails.getUuidString(),
-              HddsProtos.LifeCycleState.OPEN,
-              HddsProtos.ReplicationType.STAND_ALONE,
-              HddsProtos.ReplicationFactor.ONE, PipelineID.randomId());
-      pipeline.addMember(datanodeDetails);
+      List<DatanodeDetails> dns = new ArrayList<>();
+      dns.add(dn.getDatanodeDetails());
+      Pipeline pipeline = Pipeline.newBuilder()
+          .setState(Pipeline.PipelineState.OPEN)
+          .setId(PipelineID.randomId())
+          .setType(HddsProtos.ReplicationType.STAND_ALONE)
+          .setFactor(HddsProtos.ReplicationFactor.ONE)
+          .setNodes(dns)
+          .build();
 
       // Verify client is able to connect to the container
       try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)){
         client.connect();
-        assertTrue(client.isConnected(pipeline.getLeader()));
+        assertTrue(client.isConnected(pipeline.getFirstNode()));
       }
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: HDFS-14029. Sleep in TestLazyPersistFiles should be put into a loop. Contributed by Adam Antal.

Posted by su...@apache.org.
HDFS-14029. Sleep in TestLazyPersistFiles should be put into a loop. Contributed by Adam Antal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b899f1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b899f1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b899f1e

Branch: refs/heads/HDFS-12943
Commit: 9b899f1ebd5126a756fceb43459164165488b203
Parents: 34b2521
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Oct 25 15:14:13 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Oct 25 15:14:13 2018 -0700

----------------------------------------------------------------------
 .../fsdataset/impl/TestLazyPersistFiles.java         | 15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b899f1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index 71a9f6f..04f8127 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -151,7 +151,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
  /**
   * If NN restarted then lazyPersist files should not deleted
   */
-  @Test
+  @Test(timeout = 20000)
   public void testFileShouldNotDiscardedIfNNRestarted()
       throws IOException, InterruptedException, TimeoutException {
     getClusterBuilder().setRamDiskReplicaCapacity(2).build();
@@ -165,13 +165,12 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
     cluster.restartNameNodes();
 
     // wait for the redundancy monitor to mark the file as corrupt.
-    Thread.sleep(2 * DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
-
-    Long corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
-        .getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
-
-    // Check block detected as corrupted
-    assertThat(corruptBlkCount, is(1L));
+    Long corruptBlkCount;
+    do {
+      Thread.sleep(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_DEFAULT * 1000);
+      corruptBlkCount = (long) Iterators.size(cluster.getNameNode()
+          .getNamesystem().getBlockManager().getCorruptReplicaBlockIterator());
+    } while (corruptBlkCount != 1L);
 
     // Ensure path1 exist.
     Assert.assertTrue(fs.exists(path1));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: HADOOP-15864. Job submitter / executor fail when SBN domain name can not resolved. Contributed by He Xiaoqiao.

Posted by su...@apache.org.
HADOOP-15864. Job submitter / executor fail when SBN domain name can not resolved. Contributed by He Xiaoqiao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb2b72e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb2b72e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb2b72e6

Branch: refs/heads/HDFS-12943
Commit: fb2b72e6fce019130e10964a644b94cddbab1c06
Parents: c7f349b
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Thu Oct 25 09:33:31 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Thu Oct 25 09:33:31 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/security/SecurityUtil.java    |  9 ++++----
 .../namenode/ha/TestDelegationTokensWithHA.java | 23 ++++++++++++++++++++
 2 files changed, 28 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb2b72e6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index aa12b93..b573234 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -424,7 +424,7 @@ public final class SecurityUtil {
    */
   public static void setTokenService(Token<?> token, InetSocketAddress addr) {
     Text service = buildTokenService(addr);
-    if (token != null) {
+    if (token != null && service != null) {
       token.setService(service);
       if (LOG.isDebugEnabled()) {
         LOG.debug("Acquired token "+token);  // Token#toString() prints service
@@ -444,9 +444,10 @@ public final class SecurityUtil {
     String host = null;
     if (useIpForTokenService) {
       if (addr.isUnresolved()) { // host has no ip address
-        throw new IllegalArgumentException(
-            new UnknownHostException(addr.getHostName())
-        );
+        LOG.warn("unable to resolve host name " + addr
+            + ". Failure to construct a correct token service "
+            + "name may result in operation failures");
+        return null;
       }
       host = addr.getAddress().getHostAddress();
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb2b72e6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
index 7076ec6..e78cee9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
@@ -332,6 +332,29 @@ public class TestDelegationTokensWithHA {
     }    
   }
 
+  @Test(timeout = 300000)
+  public void testHAUtilClonesDTsDomainNameResolvedFail() throws Exception {
+    final Token<DelegationTokenIdentifier> token =
+        getDelegationToken(fs, "JobTracker");
+
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
+
+    URI haUri = new URI("hdfs://my-ha-uri/");
+    token.setService(HAUtilClient.buildTokenServiceForLogicalUri(haUri,
+        HdfsConstants.HDFS_URI_SCHEME));
+    ugi.addToken(token);
+
+    Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
+    nnAddrs.add(new InetSocketAddress("domainname.doesnot.exist",
+        nn0.getNameNodeAddress().getPort()));
+    nnAddrs.add(new InetSocketAddress("localhost",
+        nn1.getNameNodeAddress().getPort()));
+    HAUtilClient.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs);
+
+    Collection<Token<? extends TokenIdentifier>> tokens = ugi.getTokens();
+    assertEquals(3, tokens.size());
+  }
+
   /**
    * HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an
    * exception if the URI is a logical URI. This bug fails the combination of


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: YARN-7754. [Atsv2] Update document for running v1 and v2 TS. Contributed by Suma Shivaprasad.

Posted by su...@apache.org.
YARN-7754. [Atsv2] Update document for running v1 and v2 TS. Contributed by Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/486b9a4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/486b9a4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/486b9a4a

Branch: refs/heads/HDFS-12943
Commit: 486b9a4a75f413aa542338b0d866c3b490381d93
Parents: a283da2
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Tue Oct 30 11:35:01 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Tue Oct 30 11:35:01 2018 +0530

----------------------------------------------------------------------
 .../src/site/markdown/TimelineServiceV2.md              | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/486b9a4a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 2314f30..86faf6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -333,6 +333,18 @@ that it can write data to the Apache HBase cluster you are using, or set
 </property>
 ```
 
+To configure both Timeline Service 1.5 and v.2, add the following property
+
+ ```
+ <property>
+   <name>yarn.timeline-service.versions</name>
+   <value>1.5f,2.0f</value>
+ </property>
+```
+
+If the above is not configured, then it defaults to the version set in `yarn.timeline-service.version`
+
+
 #### Running Timeline Service v.2
 Restart the resource manager as well as the node managers to pick up the new configuration. The
 collectors start within the resource manager and the node managers in an embedded manner.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HDDS-620. ozone.scm.client.address should be an optional setting. Contributed by chencan and Arpit Agarwal.

Posted by su...@apache.org.
HDDS-620. ozone.scm.client.address should be an optional setting. Contributed by chencan and Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/496f0ffe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/496f0ffe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/496f0ffe

Branch: refs/heads/HDFS-12943
Commit: 496f0ffe9017b11d0d7c071bad259d132687c656
Parents: 3655e57
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Oct 29 17:14:15 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Oct 29 17:14:18 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  40 ++++-
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |  16 +-
 .../hadoop/hdds/scm/TestHddsServerUtils.java    | 153 +++++++++++++++++++
 .../ozone/client/TestHddsClientUtils.java       | 137 +++++++++++++++--
 4 files changed, 325 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/496f0ffe/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 7a42a10..09fc75b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -87,10 +87,22 @@ public final class HddsUtils {
    * @return Target InetSocketAddress for the SCM client endpoint.
    */
   public static InetSocketAddress getScmAddressForClients(Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
+    Optional<String> host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
 
     if (!host.isPresent()) {
+      // Fallback to Ozone SCM names.
+      Collection<InetSocketAddress> scmAddresses = getSCMAddresses(conf);
+      if (scmAddresses.size() > 1) {
+        throw new IllegalArgumentException(
+            ScmConfigKeys.OZONE_SCM_NAMES +
+                " must contain a single hostname. Multiple SCM hosts are " +
+                "currently unsupported");
+      }
+      host = Optional.of(scmAddresses.iterator().next().getHostName());
+    }
+
+    if (!host.isPresent()) {
       throw new IllegalArgumentException(
           ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See"
               + " https://wiki.apache.org/hadoop/Ozone#Configuration for "
@@ -109,7 +121,8 @@ public final class HddsUtils {
    * Retrieve the socket address that should be used by clients to connect
    * to the SCM for block service. If
    * {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined
-   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used.
+   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither
+   * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used.
    *
    * @param conf
    * @return Target InetSocketAddress for the SCM block client endpoint.
@@ -123,13 +136,26 @@ public final class HddsUtils {
     if (!host.isPresent()) {
       host = getHostNameFromConfigKeys(conf,
           ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-      if (!host.isPresent()) {
+    }
+
+    if (!host.isPresent()) {
+      // Fallback to Ozone SCM names.
+      Collection<InetSocketAddress> scmAddresses = getSCMAddresses(conf);
+      if (scmAddresses.size() > 1) {
         throw new IllegalArgumentException(
-            ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
-                + " must be defined. See"
-                + " https://wiki.apache.org/hadoop/Ozone#Configuration"
-                + " for details on configuring Ozone.");
+            ScmConfigKeys.OZONE_SCM_NAMES +
+                " must contain a single hostname. Multiple SCM hosts are " +
+                "currently unsupported");
       }
+      host = Optional.of(scmAddresses.iterator().next().getHostName());
+    }
+
+    if (!host.isPresent()) {
+      throw new IllegalArgumentException(
+          ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
+              + " must be defined. See"
+              + " https://wiki.apache.org/hadoop/Ozone#Configuration"
+              + " for details on configuring Ozone.");
     }
 
     final Optional<Integer> port = getPortNumberFromConfigKeys(conf,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/496f0ffe/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
index d505be3..395a77d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
@@ -28,6 +28,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.net.InetSocketAddress;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
@@ -81,12 +82,25 @@ public final class HddsServerUtil {
     // target host.
     // - OZONE_SCM_DATANODE_ADDRESS_KEY
     // - OZONE_SCM_CLIENT_ADDRESS_KEY
+    // - OZONE_SCM_NAMES
     //
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
+    Optional<String> host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY,
         ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
 
     if (!host.isPresent()) {
+      // Fallback to Ozone SCM names.
+      Collection<InetSocketAddress> scmAddresses = getSCMAddresses(conf);
+      if (scmAddresses.size() > 1) {
+        throw new IllegalArgumentException(
+            ScmConfigKeys.OZONE_SCM_NAMES +
+                " must contain a single hostname. Multiple SCM hosts are " +
+                "currently unsupported");
+      }
+      host = Optional.of(scmAddresses.iterator().next().getHostName());
+    }
+
+    if (!host.isPresent()) {
       throw new IllegalArgumentException(
           ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY +
               " must be defined. See" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/496f0ffe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
new file mode 100644
index 0000000..21acda8
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Unit tests for {@link HddsServerUtil}
+ */
+public class TestHddsServerUtils {
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestHddsServerUtils.class);
+
+  @Rule
+  public Timeout timeout = new Timeout(300_000);
+
+  @Rule
+  public ExpectedException thrown= ExpectedException.none();
+
+  /**
+   * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port.
+   */
+  @Test
+  public void testGetDatanodeAddressWithPort() {
+    final String scmHost = "host123:100";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
+    final InetSocketAddress address =
+        HddsServerUtil.getScmAddressForDataNodes(conf);
+    assertEquals(address.getHostName(), scmHost.split(":")[0]);
+    assertEquals(address.getPort(), Integer.parseInt(scmHost.split(":")[1]));
+  }
+
+  /**
+   * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY without port.
+   */
+  @Test
+  public void testGetDatanodeAddressWithoutPort() {
+    final String scmHost = "host123";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
+    final InetSocketAddress address =
+        HddsServerUtil.getScmAddressForDataNodes(conf);
+    assertEquals(address.getHostName(), scmHost);
+    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+  }
+
+  /**
+   * When OZONE_SCM_DATANODE_ADDRESS_KEY is undefined, test fallback to
+   * OZONE_SCM_CLIENT_ADDRESS_KEY.
+   */
+  @Test
+  public void testDatanodeAddressFallbackToClientNoPort() {
+    final String scmHost = "host123";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
+    final InetSocketAddress address =
+        HddsServerUtil.getScmAddressForDataNodes(conf);
+    assertEquals(address.getHostName(), scmHost);
+    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+  }
+
+  /**
+   * When OZONE_SCM_DATANODE_ADDRESS_KEY is undefined, test fallback to
+   * OZONE_SCM_CLIENT_ADDRESS_KEY. Port number defined by
+   * OZONE_SCM_CLIENT_ADDRESS_KEY should be ignored.
+   */
+  @Test
+  public void testDatanodeAddressFallbackToClientWithPort() {
+    final String scmHost = "host123:100";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
+    final InetSocketAddress address =
+        HddsServerUtil.getScmAddressForDataNodes(conf);
+    assertEquals(address.getHostName(), scmHost.split(":")[0]);
+    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+  }
+
+  /**
+   * When OZONE_SCM_DATANODE_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
+   * are undefined, test fallback to OZONE_SCM_NAMES.
+   */
+  @Test
+  public void testDatanodeAddressFallbackToScmNamesNoPort() {
+    final String scmHost = "host123";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    final InetSocketAddress address =
+        HddsServerUtil.getScmAddressForDataNodes(conf);
+    assertEquals(address.getHostName(), scmHost);
+    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+  }
+
+  /**
+   * When OZONE_SCM_DATANODE_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
+   * are undefined, test fallback to OZONE_SCM_NAMES. Port number
+   * defined by OZONE_SCM_NAMES should be ignored.
+   */
+  @Test
+  public void testDatanodeAddressFallbackToScmNamesWithPort() {
+    final String scmHost = "host123:100";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    final InetSocketAddress address =
+        HddsServerUtil.getScmAddressForDataNodes(conf);
+    assertEquals(address.getHostName(), scmHost.split(":")[0]);
+    assertEquals(address.getPort(), OZONE_SCM_DATANODE_PORT_DEFAULT);
+  }
+
+  /**
+   * getScmAddressForDataNodes should fail when OZONE_SCM_NAMES has
+   * multiple addresses.
+   */
+  @Test
+  public void testClientFailsWithMultipleScmNames() {
+    final String scmHost = "host123,host456";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    thrown.expect(IllegalArgumentException.class);
+    HddsServerUtil.getScmAddressForDataNodes(conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/496f0ffe/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
index 3aefe8a..9850778 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
@@ -19,9 +19,10 @@
 package org.apache.hadoop.ozone.client;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -29,9 +30,12 @@ import org.junit.rules.Timeout;
 
 import java.net.InetSocketAddress;
 
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
 import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 
 /**
@@ -40,7 +44,7 @@ import static org.junit.Assert.assertThat;
  */
 public class TestHddsClientUtils {
   @Rule
-  public Timeout timeout = new Timeout(300000);
+  public Timeout timeout = new Timeout(300_000);
 
   @Rule
   public ExpectedException thrown= ExpectedException.none();
@@ -52,7 +56,7 @@ public class TestHddsClientUtils {
   public void testMissingScmClientAddress() {
     final Configuration conf = new OzoneConfiguration();
     thrown.expect(IllegalArgumentException.class);
-    getScmAddressForClients(conf);
+    HddsUtils.getScmAddressForClients(conf);
   }
 
   /**
@@ -65,15 +69,15 @@ public class TestHddsClientUtils {
 
     // First try a client address with just a host name. Verify it falls
     // back to the default port.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = getScmAddressForClients(conf);
+    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4");
+    InetSocketAddress addr = HddsUtils.getScmAddressForClients(conf);
     assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT));
+    assertThat(addr.getPort(), is(OZONE_SCM_CLIENT_PORT_DEFAULT));
 
     // Next try a client address with a host name and port. Verify both
     // are used correctly.
-    conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
-    addr = getScmAddressForClients(conf);
+    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100");
+    addr = HddsUtils.getScmAddressForClients(conf);
     assertThat(addr.getHostString(), is("1.2.3.4"));
     assertThat(addr.getPort(), is(100));
   }
@@ -85,21 +89,128 @@ public class TestHddsClientUtils {
     // First try a client address with just a host name. Verify it falls
     // back to the default port.
     conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = getOmAddress(conf);
+    InetSocketAddress addr = OmUtils.getOmAddress(conf);
     assertThat(addr.getHostString(), is("1.2.3.4"));
     assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT));
 
     // Next try a client address with just a host name and port. Verify the port
     // is ignored and the default OM port is used.
     conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4:100");
-    addr = getOmAddress(conf);
+    addr = OmUtils.getOmAddress(conf);
     assertThat(addr.getHostString(), is("1.2.3.4"));
     assertThat(addr.getPort(), is(100));
 
     // Assert the we are able to use default configs if no value is specified.
     conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "");
-    addr = getOmAddress(conf);
+    addr = OmUtils.getOmAddress(conf);
     assertThat(addr.getHostString(), is("0.0.0.0"));
     assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT));
   }
+
+  @Test
+  public void testBlockClientFallbackToClientNoPort() {
+    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should
+    // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY.
+    final String scmHost = "host123";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
+    final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
+        conf);
+    assertEquals(address.getHostName(), scmHost);
+    assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+  }
+
+  @Test
+  public void testBlockClientFallbackToClientWithPort() {
+    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should
+    // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY.
+    //
+    // Verify that the OZONE_SCM_CLIENT_ADDRESS_KEY port number is ignored,
+    // if present. Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT.
+    final String scmHost = "host123:100";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
+    final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
+        conf);
+    assertEquals(address.getHostName(), scmHost.split(":")[0]);
+    assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+  }
+
+  @Test
+  public void testBlockClientFallbackToScmNamesNoPort() {
+    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
+    // are undefined it should fallback to OZONE_SCM_NAMES.
+    final String scmHost = "host456";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
+        conf);
+    assertEquals(address.getHostName(), scmHost);
+    assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+  }
+
+  @Test
+  public void testBlockClientFallbackToScmNamesWithPort() {
+    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
+    // are undefined it should fallback to OZONE_SCM_NAMES.
+    //
+    // Verify that the OZONE_SCM_NAMES port number is ignored, if present.
+    // Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT.
+    final String scmHost = "host456:200";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients(
+        conf);
+    assertEquals(address.getHostName(), scmHost.split(":")[0]);
+    assertEquals(address.getPort(), OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+  }
+
+  @Test
+  public void testClientFallbackToScmNamesNoPort() {
+    // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback
+    // to OZONE_SCM_NAMES.
+    final String scmHost = "host456";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf);
+    assertEquals(address.getHostName(), scmHost);
+    assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT);
+  }
+
+  @Test
+  public void testClientFallbackToScmNamesWithPort() {
+    // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback
+    // to OZONE_SCM_NAMES.
+    //
+    // Verify that the OZONE_SCM_NAMES port number is ignored, if present.
+    // Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT.
+    final String scmHost = "host456:300";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    final InetSocketAddress address =HddsUtils.getScmAddressForClients(conf);
+    assertEquals(address.getHostName(), scmHost.split(":")[0]);
+    assertEquals(address.getPort(), OZONE_SCM_CLIENT_PORT_DEFAULT);
+  }
+
+  @Test
+  public void testBlockClientFailsWithMultipleScmNames() {
+    // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY
+    // are undefined, fail if OZONE_SCM_NAMES has multiple SCMs.
+    final String scmHost = "host123,host456";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    thrown.expect(IllegalArgumentException.class);
+    HddsUtils.getScmAddressForBlockClients(conf);
+  }
+
+  @Test
+  public void testClientFailsWithMultipleScmNames() {
+    // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, fail if OZONE_SCM_NAMES
+    // has multiple SCMs.
+    final String scmHost = "host123,host456";
+    final Configuration conf = new OzoneConfiguration();
+    conf.set(OZONE_SCM_NAMES, scmHost);
+    thrown.expect(IllegalArgumentException.class);
+    HddsUtils.getScmAddressForClients(conf);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDDS-712. Use x-amz-storage-class to specify replication type and replication factor. Contributed by Bharat Viswanadham.

Posted by su...@apache.org.
HDDS-712. Use x-amz-storage-class to specify replication type and replication factor. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ecac351a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ecac351a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ecac351a

Branch: refs/heads/HDFS-12943
Commit: ecac351aac1702194c56743ced5a66242643f28c
Parents: 9c438ab
Author: Márton Elek <el...@apache.org>
Authored: Wed Oct 31 11:08:43 2018 +0100
Committer: Márton Elek <el...@apache.org>
Committed: Wed Oct 31 13:28:59 2018 +0100

----------------------------------------------------------------------
 .../dist/src/main/smoketest/s3/awss3.robot      |  4 +-
 .../dist/src/main/smoketest/s3/objectcopy.robot | 18 ++---
 .../src/main/smoketest/s3/objectdelete.robot    |  6 +-
 .../main/smoketest/s3/objectmultidelete.robot   |  6 +-
 .../src/main/smoketest/s3/objectputget.robot    |  2 +-
 .../ozone/s3/endpoint/ObjectEndpoint.java       | 68 ++++++++++++++-----
 .../apache/hadoop/ozone/s3/util/S3Consts.java   | 19 ++++++
 .../hadoop/ozone/s3/util/S3StorageType.java     | 55 ++++++++++++++++
 .../hadoop/ozone/s3/util/package-info.java      | 22 +++++++
 .../hadoop/ozone/s3/endpoint/TestPutObject.java | 69 +++++++++++---------
 10 files changed, 205 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
index 79db688..c1ec9f0 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/awss3.robot
@@ -29,9 +29,9 @@ ${BUCKET}             generated
 
 File upload and directory list
                         Execute                   date > /tmp/testfile
-    ${result} =         Execute AWSS3Cli          cp /tmp/testfile s3://${BUCKET}
+    ${result} =         Execute AWSS3Cli          cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://${BUCKET}
                         Should contain            ${result}         upload
-    ${result} =         Execute AWSS3Cli          cp /tmp/testfile s3://${BUCKET}/dir1/dir2/file
+    ${result} =         Execute AWSS3Cli          cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://${BUCKET}/dir1/dir2/file
                         Should contain            ${result}         upload
     ${result} =         Execute AWSS3Cli          ls s3://${BUCKET}
                         Should contain            ${result}         testfile

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot
index 2daa861..e702d9b 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot
@@ -39,28 +39,28 @@ Create Dest Bucket
 Copy Object Happy Scenario
     Run Keyword if    '${DESTBUCKET}' == 'generated1'    Create Dest Bucket
                         Execute                    date > /tmp/copyfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key copyobject/f1 --body /tmp/copyfile
+    ${result} =         Execute AWSS3ApiCli        put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key copyobject/f1 --body /tmp/copyfile
     ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix copyobject/
                         Should contain             ${result}         f1
 
-    ${result} =         Execute AWSS3ApiCli        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
+    ${result} =         Execute AWSS3ApiCli        copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
     ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${DESTBUCKET} --prefix copyobject/
                         Should contain             ${result}         f1
     #copying again will not throw error
-    ${result} =         Execute AWSS3ApiCli        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
+    ${result} =         Execute AWSS3ApiCli        copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1
     ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${DESTBUCKET} --prefix copyobject/
                         Should contain             ${result}         f1
 
 Copy Object Where Bucket is not available
-    ${result} =         Execute AWSS3APICli and checkrc        copy-object --bucket dfdfdfdfdfnonexistent --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1      255
+    ${result} =         Execute AWSS3APICli and checkrc        copy-object --storage-class REDUCED_REDUNDANCY --bucket dfdfdfdfdfnonexistent --key copyobject/f1 --copy-source ${BUCKET}/copyobject/f1      255
                         Should contain             ${result}        NoSuchBucket
-    ${result} =         Execute AWSS3APICli and checkrc        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source dfdfdfdfdfnonexistent/copyobject/f1  255
+    ${result} =         Execute AWSS3APICli and checkrc        copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source dfdfdfdfdfnonexistent/copyobject/f1  255
                         Should contain             ${result}        NoSuchBucket
 
-Copy Object Where both source and dest are same
-     ${result} =         Execute AWSS3APICli and checkrc        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${DESTBUCKET}/copyobject/f1      255
-                         Should contain             ${result}        InvalidRequest
+Copy Object Where both source and dest are same with change to storageclass
+     ${result} =         Execute AWSS3APICli        copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${DESTBUCKET}/copyobject/f1
+                         Should contain             ${result}        ETag
 
 Copy Object Where Key not available
-    ${result} =         Execute AWSS3APICli and checkrc        copy-object --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/nonnonexistentkey       255
+    ${result} =         Execute AWSS3APICli and checkrc        copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key copyobject/f1 --copy-source ${BUCKET}/nonnonexistentkey       255
                         Should contain             ${result}        NoSuchKey

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot
index 9e57d50..44e1a22 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectdelete.robot
@@ -28,7 +28,7 @@ ${BUCKET}             generated
 *** Test Cases ***
 Delete file with s3api
                         Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key deletetestapi/f1 --body /tmp/testfile
+    ${result} =         Execute AWSS3ApiCli        put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key deletetestapi/f1 --body /tmp/testfile
     ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix deletetestapi/
                         Should contain             ${result}         f1
     ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key deletetestapi/f1
@@ -46,7 +46,7 @@ Delete file with s3api, file doesn't exist
 
 Delete dir with s3api
                         Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3Cli           cp /tmp/testfile s3://${BUCKET}/deletetestapidir/f1
+    ${result} =         Execute AWSS3Cli           cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://${BUCKET}/deletetestapidir/f1
     ${result} =         Execute AWSS3Cli           ls s3://${BUCKET}/deletetestapidir/
                         Should contain             ${result}         f1
     ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key deletetestapidir/
@@ -57,7 +57,7 @@ Delete dir with s3api
 
 Delete file with s3api, file doesn't exist, prefix of a real file
                         Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3Cli           cp /tmp/testfile s3://${BUCKET}/deletetestapiprefix/filefile
+    ${result} =         Execute AWSS3Cli           cp --storage-class REDUCED_REDUNDANCY /tmp/testfile s3://${BUCKET}/deletetestapiprefix/filefile
     ${result} =         Execute AWSS3Cli           ls s3://${BUCKET}/deletetestapiprefix/
                         Should contain             ${result}         filefile
     ${result} =         Execute AWSS3APICli        delete-object --bucket ${BUCKET} --key deletetestapiprefix/file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot
index 83c967b..e8b7bd3 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectmultidelete.robot
@@ -29,9 +29,9 @@ ${BUCKET}             generated
 
 Delete file with multi delete
                         Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key multidelete/f1 --body /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key multidelete/f2 --body /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key multidelete/f3 --body /tmp/testfile
+    ${result} =         Execute AWSS3ApiCli        put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key multidelete/f1 --body /tmp/testfile
+    ${result} =         Execute AWSS3ApiCli        put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key multidelete/f2 --body /tmp/testfile
+    ${result} =         Execute AWSS3ApiCli        put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key multidelete/f3 --body /tmp/testfile
     ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix multidelete/
                         Should contain             ${result}         multidelete/f1
                         Should contain             ${result}         multidelete/f2

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
index 858e472..f650de8 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot
@@ -30,7 +30,7 @@ ${BUCKET}             generated
 
 Put object to s3
                         Execute                    date > /tmp/testfile
-    ${result} =         Execute AWSS3ApiCli        put-object --bucket ${BUCKET} --key putobject/f1 --body /tmp/testfile
+    ${result} =         Execute AWSS3ApiCli        put-object --storage-class REDUCED_REDUNDANCY --bucket ${BUCKET} --key putobject/f1 --body /tmp/testfile
     ${result} =         Execute AWSS3ApiCli        list-objects --bucket ${BUCKET} --prefix putobject/
                         Should contain             ${result}         f1
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 9782d75..7000a3a 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -18,14 +18,12 @@
 package org.apache.hadoop.ozone.s3.endpoint;
 
 import javax.ws.rs.DELETE;
-import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.HEAD;
 import javax.ws.rs.HeaderParam;
 import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
-import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.HttpHeaders;
 import javax.ws.rs.core.Response;
@@ -53,11 +51,16 @@ import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.ozone.s3.util.S3StorageType;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.http.HttpStatus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
+
 /**
  * Key level rest endpoints.
  */
@@ -91,22 +94,37 @@ public class ObjectEndpoint extends EndpointBase {
   public Response put(
       @PathParam("bucket") String bucketName,
       @PathParam("path") String keyPath,
-      @DefaultValue("STAND_ALONE") @QueryParam("replicationType")
-          ReplicationType replicationType,
-      @DefaultValue("ONE") @QueryParam("replicationFactor")
-          ReplicationFactor replicationFactor,
       @HeaderParam("Content-Length") long length,
       InputStream body) throws IOException, OS3Exception {
 
     OzoneOutputStream output = null;
     try {
-      String copyHeader = headers.getHeaderString("x-amz-copy-source");
+      String copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
+      String storageType = headers.getHeaderString(STORAGE_CLASS_HEADER);
+
+      ReplicationType replicationType;
+      ReplicationFactor replicationFactor;
+      boolean storageTypeDefault;
+      if (storageType == null) {
+        replicationType = S3StorageType.getDefault().getType();
+        replicationFactor = S3StorageType.getDefault().getFactor();
+        storageTypeDefault = true;
+      } else {
+        try {
+          replicationType = S3StorageType.valueOf(storageType).getType();
+          replicationFactor = S3StorageType.valueOf(storageType).getFactor();
+        } catch (IllegalArgumentException ex) {
+          throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT,
+              storageType);
+        }
+        storageTypeDefault = false;
+      }
 
       if (copyHeader != null) {
         //Copy object, as copy source available.
         CopyObjectResponse copyObjectResponse = copyObject(
             copyHeader, bucketName, keyPath, replicationType,
-            replicationFactor);
+            replicationFactor, storageTypeDefault);
         return Response.status(Status.OK).entity(copyObjectResponse).header(
             "Connection", "close").build();
       }
@@ -254,7 +272,8 @@ public class ObjectEndpoint extends EndpointBase {
                                         String destBucket,
                                         String destkey,
                                         ReplicationType replicationType,
-                                        ReplicationFactor replicationFactor)
+                                        ReplicationFactor replicationFactor,
+                                        boolean storageTypeDefault)
       throws OS3Exception, IOException {
 
     if (copyHeader.startsWith("/")) {
@@ -276,18 +295,35 @@ public class ObjectEndpoint extends EndpointBase {
     boolean closed = false;
     try {
       // Checking whether we trying to copying to it self.
+
       if (sourceBucket.equals(destBucket)) {
         if (sourceKey.equals(destkey)) {
-          OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
-              .INVALID_REQUEST, copyHeader);
-          ex.setErrorMessage("This copy request is illegal because it is " +
-              "trying to copy an object to it self itself without changing " +
-              "the object's metadata, storage class, website redirect " +
-              "location or encryption attributes.");
-          throw ex;
+          // When copying to same storage type when storage type is provided,
+          // we should not throw exception, as aws cli checks if any of the
+          // options like storage type are provided or not when source and
+          // dest are given same
+          if (storageTypeDefault) {
+            OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
+                .INVALID_REQUEST, copyHeader);
+            ex.setErrorMessage("This copy request is illegal because it is " +
+                "trying to copy an object to it self itself without changing " +
+                "the object's metadata, storage class, website redirect " +
+                "location or encryption attributes.");
+            throw ex;
+          } else {
+            // TODO: Actually here we should change storage type, as ozone
+            // still does not support this just returning dummy response
+            // for now
+            CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
+            copyObjectResponse.setETag(OzoneUtils.getRequestID());
+            copyObjectResponse.setLastModified(Instant.ofEpochMilli(
+                Time.now()));
+            return copyObjectResponse;
+          }
         }
       }
 
+
       OzoneBucket sourceOzoneBucket = getBucket(sourceBucket);
       OzoneBucket destOzoneBucket = getBucket(destBucket);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
new file mode 100644
index 0000000..2e7b965
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
@@ -0,0 +1,19 @@
+package org.apache.hadoop.ozone.s3.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Set of constants used for S3 implementation.
+ */
+@InterfaceAudience.Private
+public final class S3Consts {
+
+  //Never Constructed
+  private S3Consts() {
+
+  }
+
+  public static final String COPY_SOURCE_HEADER = "x-amz-copy-source";
+  public static final String STORAGE_CLASS_HEADER = "x-amz-storage-class";
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
new file mode 100644
index 0000000..c9d1b50
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.s3.util;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+
+/**
+ * Maps S3 storage class values to Ozone replication values.
+ */
+
+public enum S3StorageType {
+
+  REDUCED_REDUNDANCY(ReplicationType.STAND_ALONE, ReplicationFactor.ONE),
+  STANDARD(ReplicationType.RATIS, ReplicationFactor.THREE);
+
+  private final ReplicationType type;
+  private final ReplicationFactor factor;
+
+  S3StorageType(
+      ReplicationType type,
+      ReplicationFactor factor) {
+    this.type = type;
+    this.factor = factor;
+  }
+
+  public ReplicationFactor getFactor() {
+    return factor;
+  }
+
+  public ReplicationType getType() {
+    return type;
+  }
+
+  public static S3StorageType getDefault() {
+    return STANDARD;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java
new file mode 100644
index 0000000..af93f08
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains Ozone S3 Util classes.
+ */
+package org.apache.hadoop.ozone.s3.util;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ecac351a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPutObject.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPutObject.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPutObject.java
index 4f94e56..eb05861 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPutObject.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPutObject.java
@@ -26,19 +26,21 @@ import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.nio.charset.Charset;
 
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClientStub;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 
 import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.when;
 
@@ -80,9 +82,9 @@ public class TestPutObject {
     objectEndpoint.setHeaders(headers);
 
     //WHEN
-    Response response = objectEndpoint.put(bucketName, keyName,
-        ReplicationType.STAND_ALONE, ReplicationFactor.ONE, CONTENT.length(),
-        body);
+    Response response = objectEndpoint.put(bucketName, keyName, CONTENT
+        .length(), body);
+
 
     //THEN
     String volumeName = clientStub.getObjectStore()
@@ -113,8 +115,6 @@ public class TestPutObject {
 
     //WHEN
     Response response = objectEndpoint.put(bucketName, keyName,
-        ReplicationType.STAND_ALONE,
-        ReplicationFactor.ONE,
         chunkedContent.length(),
         new ByteArrayInputStream(chunkedContent.getBytes()));
 
@@ -140,8 +140,7 @@ public class TestPutObject {
     keyName = "sourceKey";
 
     Response response = objectEndpoint.put(bucketName, keyName,
-        ReplicationType.STAND_ALONE, ReplicationFactor.ONE, CONTENT.length(),
-        body);
+        CONTENT.length(), body);
 
     String volumeName = clientStub.getObjectStore().getOzoneVolumeName(
         bucketName);
@@ -157,12 +156,10 @@ public class TestPutObject {
 
 
     // Add copy header, and then call put
-    when(headers.getHeaderString("x-amz-copy-source")).thenReturn(
+    when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
         bucketName  + "/" + keyName);
 
-    response = objectEndpoint.put(destBucket, destkey,
-        ReplicationType.STAND_ALONE, ReplicationFactor.ONE, CONTENT.length(),
-        body);
+    response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), body);
 
     // Check destination key and response
     volumeName = clientStub.getObjectStore().getOzoneVolumeName(destBucket);
@@ -176,8 +173,7 @@ public class TestPutObject {
 
     // source and dest same
     try {
-      objectEndpoint.put(bucketName, keyName, ReplicationType.STAND_ALONE,
-          ReplicationFactor.ONE, CONTENT.length(), body);
+      objectEndpoint.put(bucketName, keyName, CONTENT.length(), body);
       fail("test copy object failed");
     } catch (OS3Exception ex) {
       Assert.assertTrue(ex.getErrorMessage().contains("This copy request is " +
@@ -186,10 +182,9 @@ public class TestPutObject {
 
     // source bucket not found
     try {
-      when(headers.getHeaderString("x-amz-copy-source")).thenReturn(
+      when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
           nonexist + "/"  + keyName);
-      response = objectEndpoint.put(destBucket, destkey,
-          ReplicationType.STAND_ALONE, ReplicationFactor.ONE, CONTENT.length(),
+      objectEndpoint.put(destBucket, destkey, CONTENT.length(),
           body);
       fail("test copy object failed");
     } catch (OS3Exception ex) {
@@ -198,11 +193,9 @@ public class TestPutObject {
 
     // dest bucket not found
     try {
-      when(headers.getHeaderString("x-amz-copy-source")).thenReturn(
+      when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
           bucketName + "/" + keyName);
-      response = objectEndpoint.put(nonexist, destkey,
-          ReplicationType.STAND_ALONE, ReplicationFactor.ONE, CONTENT.length(),
-          body);
+      objectEndpoint.put(nonexist, destkey, CONTENT.length(), body);
       fail("test copy object failed");
     } catch (OS3Exception ex) {
       Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
@@ -210,11 +203,9 @@ public class TestPutObject {
 
     //Both source and dest bucket not found
     try {
-      when(headers.getHeaderString("x-amz-copy-source")).thenReturn(
+      when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
           nonexist + "/" + keyName);
-      response = objectEndpoint.put(nonexist, destkey,
-          ReplicationType.STAND_ALONE, ReplicationFactor.ONE, CONTENT.length(),
-          body);
+      objectEndpoint.put(nonexist, destkey, CONTENT.length(), body);
       fail("test copy object failed");
     } catch (OS3Exception ex) {
       Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
@@ -222,15 +213,33 @@ public class TestPutObject {
 
     // source key not found
     try {
-      when(headers.getHeaderString("x-amz-copy-source")).thenReturn(
+      when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn(
           bucketName + "/" + nonexist);
-      response = objectEndpoint.put("nonexistent", keyName,
-          ReplicationType.STAND_ALONE, ReplicationFactor.ONE, CONTENT.length(),
-          body);
+      objectEndpoint.put("nonexistent", keyName, CONTENT.length(), body);
       fail("test copy object failed");
     } catch (OS3Exception ex) {
       Assert.assertTrue(ex.getCode().contains("NoSuchBucket"));
     }
 
   }
+
+  @Test
+  public void testInvalidStorageType() throws IOException {
+    HttpHeaders headers = Mockito.mock(HttpHeaders.class);
+    ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes());
+    objectEndpoint.setHeaders(headers);
+    keyName = "sourceKey";
+    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random");
+
+    try {
+      Response response = objectEndpoint.put(bucketName, keyName,
+          CONTENT.length(), body);
+      fail("testInvalidStorageType");
+    } catch (OS3Exception ex) {
+      assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(),
+          ex.getErrorMessage());
+      assertEquals("random", ex.getResource());
+    }
+
+  }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HDDS-749. Restructure BlockId class in Ozone. Contributed by Shashikant Banerjee.

Posted by su...@apache.org.
HDDS-749. Restructure BlockId class in Ozone. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7757331d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7757331d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7757331d

Branch: refs/heads/HDFS-12943
Commit: 7757331dbc043694891a5242ac161adece9e8d6a
Parents: 486b9a4
Author: Shashikant Banerjee <sh...@apache.org>
Authored: Tue Oct 30 14:15:27 2018 +0530
Committer: Shashikant Banerjee <sh...@apache.org>
Committed: Tue Oct 30 14:15:27 2018 +0530

----------------------------------------------------------------------
 .../hdds/scm/storage/ChunkOutputStream.java     | 17 ++--
 .../org/apache/hadoop/hdds/client/BlockID.java  | 85 ++++++++++++++------
 .../hadoop/hdds/client/ContainerBlockID.java    | 79 ++++++++++++++++++
 .../common/helpers/AllocatedBlock.java          | 21 ++---
 ...kLocationProtocolClientSideTranslatorPB.java |  5 +-
 .../scm/storage/ContainerProtocolCalls.java     |  7 +-
 .../apache/hadoop/ozone/common/BlockGroup.java  |  3 +-
 .../container/common/helpers/BlockData.java     |  8 +-
 ...kLocationProtocolServerSideTranslatorPB.java |  2 +-
 .../main/proto/DatanodeContainerProtocol.proto  |  4 +-
 .../main/proto/ScmBlockLocationProtocol.proto   |  2 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |  7 +-
 .../container/keyvalue/KeyValueHandler.java     |  5 +-
 .../container/keyvalue/helpers/BlockUtils.java  |  2 -
 .../keyvalue/impl/BlockManagerImpl.java         |  6 +-
 .../keyvalue/interfaces/BlockManager.java       |  3 +-
 .../keyvalue/TestBlockManagerImpl.java          |  6 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  3 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |  3 +-
 .../ozone/client/io/ChunkGroupOutputStream.java | 21 ++---
 .../ozone/om/helpers/OmKeyLocationInfo.java     | 19 +----
 .../src/main/proto/OzoneManagerProtocol.proto   |  1 -
 .../container/TestContainerReplication.java     |  2 +-
 .../common/impl/TestCloseContainerHandler.java  |  2 +-
 .../common/impl/TestContainerPersistence.java   | 14 ++--
 .../TestGetCommittedBlockLengthAndPutKey.java   | 16 ++--
 .../hadoop/ozone/web/client/TestKeys.java       |  2 +-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  7 +-
 .../ozone/om/ScmBlockLocationTestIngClient.java |  3 +-
 29 files changed, 227 insertions(+), 128 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 4547163..4e881c4 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm.storage;
 
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.commons.codec.digest.DigestUtils;
@@ -57,7 +58,7 @@ import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
  */
 public class ChunkOutputStream extends OutputStream {
 
-  private final BlockID blockID;
+  private BlockID blockID;
   private final String key;
   private final String traceID;
   private final BlockData.Builder containerBlockData;
@@ -67,7 +68,6 @@ public class ChunkOutputStream extends OutputStream {
   private final String streamId;
   private int chunkIndex;
   private int chunkSize;
-  private long blockCommitSequenceId;
 
   /**
    * Creates a new ChunkOutputStream.
@@ -96,15 +96,14 @@ public class ChunkOutputStream extends OutputStream {
     this.buffer = ByteBuffer.allocate(chunkSize);
     this.streamId = UUID.randomUUID().toString();
     this.chunkIndex = 0;
-    blockCommitSequenceId = 0;
   }
 
   public ByteBuffer getBuffer() {
     return buffer;
   }
 
-  public long getBlockCommitSequenceId() {
-    return blockCommitSequenceId;
+  public BlockID getBlockID() {
+    return blockID;
   }
 
   @Override
@@ -165,8 +164,12 @@ public class ChunkOutputStream extends OutputStream {
       try {
         ContainerProtos.PutBlockResponseProto responseProto =
             putBlock(xceiverClient, containerBlockData.build(), traceID);
-        blockCommitSequenceId =
-            responseProto.getCommittedBlockLength().getBlockCommitSequenceId();
+        BlockID responseBlockID = BlockID.getFromProtobuf(
+            responseProto.getCommittedBlockLength().getBlockID());
+        Preconditions.checkState(blockID.getContainerBlockID()
+            .equals(responseBlockID.getContainerBlockID()));
+        // updates the bcsId of the block
+        blockID = responseBlockID;
       } catch (IOException e) {
         throw new IOException(
             "Unexpected Storage Container Exception: " + e.toString(), e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
index 8149740..a863437 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java
@@ -23,52 +23,88 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import java.util.Objects;
 
 /**
- * BlockID of ozone (containerID  localID).
+ * BlockID of Ozone (containerID + localID + blockCommitSequenceId).
  */
+
 public class BlockID {
-  private long containerID;
-  private long localID;
+
+  private ContainerBlockID containerBlockID;
+  private long blockCommitSequenceId;
 
   public BlockID(long containerID, long localID) {
-    this.containerID = containerID;
-    this.localID = localID;
+    this(containerID, localID, 0);
+  }
+
+  private BlockID(long containerID, long localID, long bcsID) {
+    containerBlockID = new ContainerBlockID(containerID, localID);
+    blockCommitSequenceId = bcsID;
+  }
+
+  public BlockID(ContainerBlockID containerBlockID) {
+    this(containerBlockID, 0);
+  }
+
+  private BlockID(ContainerBlockID containerBlockID, long bcsId) {
+    this.containerBlockID = containerBlockID;
+    blockCommitSequenceId = bcsId;
   }
 
   public long getContainerID() {
-    return containerID;
+    return containerBlockID.getContainerID();
   }
 
   public long getLocalID() {
-    return localID;
+    return containerBlockID.getLocalID();
   }
 
-  @Override
-  public String toString() {
-    return new ToStringBuilder(this).
-        append("containerID", containerID).
-        append("localID", localID).
-        toString();
+  public long getBlockCommitSequenceId() {
+    return blockCommitSequenceId;
   }
 
-  public HddsProtos.BlockID getProtobuf() {
-    return HddsProtos.BlockID.newBuilder().
-        setContainerID(containerID).setLocalID(localID).build();
+  public void setBlockCommitSequenceId(long blockCommitSequenceId) {
+    this.blockCommitSequenceId = blockCommitSequenceId;
   }
 
-  public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) {
-    return new BlockID(blockID.getContainerID(),
-        blockID.getLocalID());
+  public ContainerBlockID getContainerBlockID() {
+    return containerBlockID;
+  }
+
+  public void setContainerBlockID(ContainerBlockID containerBlockID) {
+    this.containerBlockID = containerBlockID;
+  }
+
+  @Override
+  public String toString() {
+    return new ToStringBuilder(this)
+        .append("containerID", containerBlockID.getContainerID())
+        .append("localID", containerBlockID.getLocalID())
+        .append("blockCommitSequenceId", blockCommitSequenceId)
+        .toString();
   }
 
   public ContainerProtos.DatanodeBlockID getDatanodeBlockIDProtobuf() {
     return ContainerProtos.DatanodeBlockID.newBuilder().
-        setContainerID(containerID).setLocalID(localID).build();
+        setContainerID(containerBlockID.getContainerID())
+        .setLocalID(containerBlockID.getLocalID())
+        .setBlockCommitSequenceId(blockCommitSequenceId).build();
   }
 
   public static BlockID getFromProtobuf(
       ContainerProtos.DatanodeBlockID blockID) {
     return new BlockID(blockID.getContainerID(),
-        blockID.getLocalID());
+        blockID.getLocalID(), blockID.getBlockCommitSequenceId());
+  }
+
+  public HddsProtos.BlockID getProtobuf() {
+    return HddsProtos.BlockID.newBuilder()
+        .setContainerBlockID(containerBlockID.getProtobuf())
+        .setBlockCommitSequenceId(blockCommitSequenceId).build();
+  }
+
+  public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) {
+    return new BlockID(
+        ContainerBlockID.getFromProtobuf(blockID.getContainerBlockID()),
+        blockID.getBlockCommitSequenceId());
   }
 
   @Override
@@ -80,11 +116,14 @@ public class BlockID {
       return false;
     }
     BlockID blockID = (BlockID) o;
-    return containerID == blockID.containerID && localID == blockID.localID;
+    return containerBlockID.equals(blockID.getContainerBlockID())
+        && blockCommitSequenceId == blockID.getBlockCommitSequenceId();
   }
 
   @Override
   public int hashCode() {
-    return Objects.hash(containerID, localID);
+    return Objects
+        .hash(containerBlockID.getContainerID(), containerBlockID.getLocalID(),
+            blockCommitSequenceId);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java
new file mode 100644
index 0000000..82084f2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ContainerBlockID.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds.client;
+
+import org.apache.commons.lang3.builder.ToStringBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import java.util.Objects;
+
+/**
+ * BlockID returned by SCM during allocation of block (containerID + localID).
+ */
+public class ContainerBlockID {
+  private long containerID;
+  private long localID;
+
+  public ContainerBlockID(long containerID, long localID) {
+    this.containerID = containerID;
+    this.localID = localID;
+  }
+
+  public long getContainerID() {
+    return containerID;
+  }
+
+  public long getLocalID() {
+    return localID;
+  }
+
+  @Override
+  public String toString() {
+    return new ToStringBuilder(this).
+        append("containerID", containerID).
+        append("localID", localID).
+        toString();
+  }
+
+  public HddsProtos.ContainerBlockID getProtobuf() {
+    return HddsProtos.ContainerBlockID.newBuilder().
+        setContainerID(containerID).setLocalID(localID).build();
+  }
+
+  public static ContainerBlockID getFromProtobuf(
+      HddsProtos.ContainerBlockID containerBlockID) {
+    return new ContainerBlockID(containerBlockID.getContainerID(),
+        containerBlockID.getLocalID());
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ContainerBlockID blockID = (ContainerBlockID) o;
+    return containerID == blockID.containerID && localID == blockID.localID;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(containerID, localID);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
index f657b74..93af56d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdds.scm.container.common.helpers;
 
+import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.client.BlockID;
 
 /**
  * Allocated block wraps the result returned from SCM#allocateBlock which
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.client.BlockID;
  */
 public final class AllocatedBlock {
   private Pipeline pipeline;
-  private BlockID blockID;
+  private ContainerBlockID containerBlockID;
   // Indicates whether the client should create container before writing block.
   private boolean shouldCreateContainer;
 
@@ -36,7 +36,7 @@ public final class AllocatedBlock {
    */
   public static class Builder {
     private Pipeline pipeline;
-    private BlockID blockID;
+    private ContainerBlockID containerBlockID;
     private boolean shouldCreateContainer;
 
     public Builder setPipeline(Pipeline p) {
@@ -44,8 +44,8 @@ public final class AllocatedBlock {
       return this;
     }
 
-    public Builder setBlockID(BlockID blockId) {
-      this.blockID = blockId;
+    public Builder setContainerBlockID(ContainerBlockID blockId) {
+      this.containerBlockID = blockId;
       return this;
     }
 
@@ -55,14 +55,15 @@ public final class AllocatedBlock {
     }
 
     public AllocatedBlock build() {
-      return new AllocatedBlock(pipeline, blockID, shouldCreateContainer);
+      return new AllocatedBlock(pipeline, containerBlockID,
+          shouldCreateContainer);
     }
   }
 
-  private AllocatedBlock(Pipeline pipeline, BlockID blockID,
+  private AllocatedBlock(Pipeline pipeline, ContainerBlockID containerBlockID,
       boolean shouldCreateContainer) {
     this.pipeline = pipeline;
-    this.blockID = blockID;
+    this.containerBlockID = containerBlockID;
     this.shouldCreateContainer = shouldCreateContainer;
   }
 
@@ -70,8 +71,8 @@ public final class AllocatedBlock {
     return pipeline;
   }
 
-  public BlockID getBlockID() {
-    return blockID;
+  public ContainerBlockID getBlockID() {
+    return containerBlockID;
   }
 
   public boolean getCreateContainer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
index e684ae3..f868209 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
@@ -20,7 +20,7 @@ import com.google.common.base.Preconditions;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -102,7 +102,8 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB
           response.getErrorMessage() : "Allocate block failed.");
     }
     AllocatedBlock.Builder builder = new AllocatedBlock.Builder()
-        .setBlockID(BlockID.getFromProtobuf(response.getBlockID()))
+        .setContainerBlockID(
+            ContainerBlockID.getFromProtobuf(response.getContainerBlockID()))
         .setPipeline(Pipeline.getFromProtobuf(response.getPipeline()))
         .setShouldCreateContainer(response.getCreateContainer());
     return builder.build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index df1467b..150b1d6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -81,17 +81,14 @@ public final class ContainerProtocolCalls  {
    * @param xceiverClient client to perform call
    * @param datanodeBlockID blockID to identify container
    * @param traceID container protocol call args
-   * @param blockCommitSequenceId latest commit Id of the block
    * @return container protocol get block response
    * @throws IOException if there is an I/O error while performing the call
    */
   public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient,
-      DatanodeBlockID datanodeBlockID, String traceID,
-      long blockCommitSequenceId) throws IOException {
+      DatanodeBlockID datanodeBlockID, String traceID) throws IOException {
     GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
         .newBuilder()
-        .setBlockID(datanodeBlockID)
-        .setBlockCommitSequenceId(blockCommitSequenceId);
+        .setBlockID(datanodeBlockID);
     String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
 
     ContainerCommandRequestProto request = ContainerCommandRequestProto

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
index 7a5403f..2f6030f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java
@@ -61,7 +61,8 @@ public final class BlockGroup {
   public static BlockGroup getFromProto(KeyBlocks proto) {
     List<BlockID> blockIDs = new ArrayList<>();
     for (HddsProtos.BlockID block : proto.getBlocksList()) {
-      blockIDs.add(new BlockID(block.getContainerID(), block.getLocalID()));
+      blockIDs.add(new BlockID(block.getContainerBlockID().getContainerID(),
+          block.getContainerBlockID().getLocalID()));
     }
     return BlockGroup.newBuilder().setKeyName(proto.getKey())
         .addAllBlockIDs(blockIDs).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
index 87cf824..3b9e57c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
@@ -34,7 +34,6 @@ import java.util.ArrayList;
 public class BlockData {
   private final BlockID blockID;
   private final Map<String, String> metadata;
-  private long blockCommitSequenceId;
 
   /**
    * Represent a list of chunks.
@@ -65,15 +64,14 @@ public class BlockData {
     this.blockID = blockID;
     this.metadata = new TreeMap<>();
     this.size = 0;
-    blockCommitSequenceId = 0;
   }
 
   public long getBlockCommitSequenceId() {
-    return blockCommitSequenceId;
+    return blockID.getBlockCommitSequenceId();
   }
 
   public void setBlockCommitSequenceId(long blockCommitSequenceId) {
-    this.blockCommitSequenceId = blockCommitSequenceId;
+    this.blockID.setBlockCommitSequenceId(blockCommitSequenceId);
   }
 
   /**
@@ -95,7 +93,6 @@ public class BlockData {
     if (data.hasSize()) {
       Preconditions.checkArgument(data.getSize() == blockData.getSize());
     }
-    blockData.setBlockCommitSequenceId(data.getBlockCommitSequenceId());
     return blockData;
   }
 
@@ -115,7 +112,6 @@ public class BlockData {
     }
     builder.addAllChunks(getChunks());
     builder.setSize(size);
-    builder.setBlockCommitSequenceId(blockCommitSequenceId);
     return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
index 37a1309..2ecf1f4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java
@@ -76,7 +76,7 @@ public final class ScmBlockLocationProtocolServerSideTranslatorPB
       if (allocatedBlock != null) {
         return
             AllocateScmBlockResponseProto.newBuilder()
-                .setBlockID(allocatedBlock.getBlockID().getProtobuf())
+                .setContainerBlockID(allocatedBlock.getBlockID().getProtobuf())
                 .setPipeline(allocatedBlock.getPipeline().getProtobufMessage())
                 .setCreateContainer(allocatedBlock.getCreateContainer())
                 .setErrorCode(AllocateScmBlockResponseProto.Error.success)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index f9262ba..318ec09 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -150,6 +150,7 @@ enum Result {
 message DatanodeBlockID {
   required int64 containerID = 1;
   required int64 localID = 2;
+  optional uint64 blockCommitSequenceId = 3 [default = 0];
 }
 
 message KeyValue {
@@ -303,7 +304,6 @@ message BlockData {
   repeated KeyValue metadata = 3;
   repeated ChunkInfo chunks = 4;
   optional int64 size = 5;
-  optional uint64 blockCommitSequenceId = 6;
 }
 
 // Block Messages.
@@ -317,7 +317,6 @@ message  PutBlockResponseProto {
 
 message  GetBlockRequestProto  {
   required DatanodeBlockID blockID = 1;
-  optional uint64 blockCommitSequenceId = 2 [default = 0];
 }
 
 message  GetBlockResponseProto  {
@@ -336,7 +335,6 @@ message  GetCommittedBlockLengthRequestProto {
 message  GetCommittedBlockLengthResponseProto {
   required DatanodeBlockID blockID = 1;
   required int64 blockLength = 2;
-  optional uint64 blockCommitSequenceId = 3 [default = 0];
 }
 
 message   DeleteBlockResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
index 01a0dde..dc68481 100644
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
@@ -103,7 +103,7 @@ message AllocateScmBlockResponseProto {
     unknownFailure = 4;
   }
   required Error errorCode = 1;
-  optional BlockID blockID = 2;
+  optional ContainerBlockID containerBlockID = 2;
   optional hadoop.hdds.Pipeline pipeline = 3;
   optional bool createContainer = 4;
   optional string errorMessage = 5;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
index 9e813af..62b4833 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -186,7 +186,12 @@ enum ScmOps {
 /**
  * Block ID that uniquely identify a block by SCM.
  */
-message BlockID {
+message ContainerBlockID {
     required int64 containerID = 1;
     required int64 localID = 2;
 }
+
+message BlockID {
+    required ContainerBlockID containerBlockID = 1;
+    optional uint64 blockCommitSequenceId = 2 [default = 0];
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index de72d25..b0bc08b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -483,8 +483,7 @@ public class KeyValueHandler extends Handler {
     try {
       BlockID blockID = BlockID.getFromProtobuf(
           request.getGetBlock().getBlockID());
-      responseData = blockManager.getBlock(kvContainer, blockID,
-          request.getGetBlock().getBlockCommitSequenceId());
+      responseData = blockManager.getBlock(kvContainer, blockID);
       long numBytes = responseData.getProtoBufMessage().toByteArray().length;
       metrics.incContainerBytesStats(Type.GetBlock, numBytes);
 
@@ -759,7 +758,7 @@ public class KeyValueHandler extends Handler {
           .getBlockID());
       // TODO: add bcsId as a part of getSmallFile transaction
       // by default its 0
-      BlockData responseData = blockManager.getBlock(kvContainer, blockID, 0);
+      BlockData responseData = blockManager.getBlock(kvContainer, blockID);
 
       ContainerProtos.ChunkInfo chunkInfo = null;
       ByteString dataBuf = ByteString.EMPTY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
index d6cadc8..e085fb0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -139,8 +139,6 @@ public final class BlockUtils {
         committedBlockLengthResponseBuilder =
         getCommittedBlockLengthResponseBuilder(blockLength,
             blockData.getBlockID());
-    committedBlockLengthResponseBuilder
-        .setBlockCommitSequenceId(blockData.getBlockCommitSequenceId());
     PutBlockResponseProto.Builder putKeyResponse =
         PutBlockResponseProto.newBuilder();
     putKeyResponse

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 3176189..e2e5700 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -129,13 +129,13 @@ public class BlockManagerImpl implements BlockManager {
    *
    * @param container - Container from which block need to be fetched.
    * @param blockID - BlockID of the block.
-   * @param bcsId latest commit Id of the block
    * @return Key Data.
    * @throws IOException
    */
   @Override
-  public BlockData getBlock(Container container, BlockID blockID, long bcsId)
+  public BlockData getBlock(Container container, BlockID blockID)
       throws IOException {
+    long bcsId = blockID.getBlockCommitSequenceId();
     Preconditions.checkNotNull(blockID,
         "BlockID cannot be null in GetBlock request");
     Preconditions.checkNotNull(blockID.getContainerID(),
@@ -162,7 +162,7 @@ public class BlockManagerImpl implements BlockManager {
     }
     ContainerProtos.BlockData blockData =
         ContainerProtos.BlockData.parseFrom(kData);
-    long id = blockData.getBlockCommitSequenceId();
+    long id = blockData.getBlockID().getBlockCommitSequenceId();
     if (id < bcsId) {
       throw new StorageContainerException(
           "bcsId " + bcsId + " mismatches with existing block Id "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
index 8c86583..6812b0d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
@@ -45,11 +45,10 @@ public interface BlockManager {
    *
    * @param container - Container from which block need to be get.
    * @param blockID - BlockID of the Block.
-   * @param bcsId latest commit id of the block
    * @return Block Data.
    * @throws IOException
    */
-  BlockData getBlock(Container container, BlockID blockID, long bcsId)
+  BlockData getBlock(Container container, BlockID blockID)
       throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
index 65477d8..6fe6d81 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
@@ -113,7 +113,7 @@ public class TestBlockManagerImpl {
     assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
     //Get Block
     BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer,
-        blockData.getBlockID(), 0);
+        blockData.getBlockID());
 
     assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID());
     assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID());
@@ -139,7 +139,7 @@ public class TestBlockManagerImpl {
       assertEquals(0,
           keyValueContainer.getContainerData().getKeyCount());
       try {
-        blockManager.getBlock(keyValueContainer, blockID, 0);
+        blockManager.getBlock(keyValueContainer, blockID);
         fail("testDeleteBlock");
       } catch (StorageContainerException ex) {
         GenericTestUtils.assertExceptionContains(
@@ -197,7 +197,7 @@ public class TestBlockManagerImpl {
           keyValueContainer.getContainerData().getKeyCount());
       try {
         //Since the block has been deleted, we should not be able to find it
-        blockManager.getBlock(keyValueContainer, blockID, 0);
+        blockManager.getBlock(keyValueContainer, blockID);
         fail("testGetNoSuchBlock failed");
       } catch (StorageContainerException ex) {
         GenericTestUtils.assertExceptionContains(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 681d021..c878d97 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.block;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmUtils;
@@ -318,7 +319,7 @@ public class BlockManagerImpl implements EventHandler<Boolean>,
 
     AllocatedBlock.Builder abb =
         new AllocatedBlock.Builder()
-            .setBlockID(new BlockID(containerID, localID))
+            .setContainerBlockID(new ContainerBlockID(containerID, localID))
             .setPipeline(containerWithPipeline.getPipeline())
             .setShouldCreateContainer(createContainer);
     LOG.trace("New block allocated : {} Container ID: {}", localID,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 0c09fc8..5632756 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -297,8 +297,7 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
         ContainerProtos.DatanodeBlockID datanodeBlockID = blockID
             .getDatanodeBlockIDProtobuf();
         ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls
-            .getBlock(xceiverClient, datanodeBlockID, requestId,
-                omKeyLocationInfo.getBlockCommitSequenceId());
+            .getBlock(xceiverClient, datanodeBlockID, requestId);
         List<ContainerProtos.ChunkInfo> chunks =
             response.getBlockData().getChunksList();
         for (ContainerProtos.ChunkInfo chunk : chunks) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 6d13bb2..78d69c1 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -128,7 +128,6 @@ public class ChunkGroupOutputStream extends OutputStream {
           new OmKeyLocationInfo.Builder().setBlockID(streamEntry.blockID)
               .setShouldCreateContainer(false)
               .setLength(streamEntry.currentPosition).setOffset(0)
-              .setBlockCommitSequenceId(streamEntry.getBlockCommitSequenceId())
               .build();
       locationInfoList.add(info);
     }
@@ -614,7 +613,7 @@ public class ChunkGroupOutputStream extends OutputStream {
 
   private static class ChunkOutputStreamEntry extends OutputStream {
     private OutputStream outputStream;
-    private final BlockID blockID;
+    private BlockID blockID;
     private final String key;
     private final XceiverClientManager xceiverClientManager;
     private final XceiverClientSpi xceiverClient;
@@ -700,6 +699,11 @@ public class ChunkGroupOutputStream extends OutputStream {
     public void close() throws IOException {
       if (this.outputStream != null) {
         this.outputStream.close();
+        // after closing the chunkOutPutStream, blockId would have been
+        // reconstructed with updated bcsId
+        if (this.outputStream instanceof ChunkOutputStream) {
+          this.blockID = ((ChunkOutputStream) outputStream).getBlockID();
+        }
       }
     }
 
@@ -711,19 +715,6 @@ public class ChunkGroupOutputStream extends OutputStream {
       throw new IOException("Invalid Output Stream for Key: " + key);
     }
 
-    long getBlockCommitSequenceId() throws IOException {
-      if (this.outputStream instanceof ChunkOutputStream) {
-        ChunkOutputStream out = (ChunkOutputStream) this.outputStream;
-        return out.getBlockCommitSequenceId();
-      } else if (outputStream == null) {
-        // For a pre allocated block for which no write has been initiated,
-        // the OutputStream will be null here.
-        // In such cases, the default blockCommitSequenceId will be 0
-        return 0;
-      }
-      throw new IOException("Invalid Output Stream for Key: " + key);
-    }
-
     public void cleanup() {
       checkStream();
       if (this.outputStream instanceof ChunkOutputStream) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
index 9d54cea..d86153d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
@@ -31,15 +31,13 @@ public final class OmKeyLocationInfo {
   private final long offset;
   // the version number indicating when this block was added
   private long createVersion;
-  private final long blockCommitSequenceId;
 
   private OmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer,
-      long length, long offset, long blockCommitSequenceId) {
+      long length, long offset) {
     this.blockID = blockID;
     this.shouldCreateContainer = shouldCreateContainer;
     this.length = length;
     this.offset = offset;
-    this.blockCommitSequenceId = blockCommitSequenceId;
   }
 
   public void setCreateVersion(long version) {
@@ -79,7 +77,7 @@ public final class OmKeyLocationInfo {
   }
 
   public long getBlockCommitSequenceId() {
-    return blockCommitSequenceId;
+    return blockID.getBlockCommitSequenceId();
   }
 
   /**
@@ -90,7 +88,6 @@ public final class OmKeyLocationInfo {
     private boolean shouldCreateContainer;
     private long length;
     private long offset;
-    private long blockCommitSequenceId;
 
     public Builder setBlockID(BlockID blockId) {
       this.blockID = blockId;
@@ -112,14 +109,9 @@ public final class OmKeyLocationInfo {
       return this;
     }
 
-    public Builder setBlockCommitSequenceId(long sequenceId) {
-      this.blockCommitSequenceId = sequenceId;
-      return this;
-    }
-
     public OmKeyLocationInfo build() {
       return new OmKeyLocationInfo(blockID,
-          shouldCreateContainer, length, offset, blockCommitSequenceId);
+          shouldCreateContainer, length, offset);
     }
   }
 
@@ -130,7 +122,6 @@ public final class OmKeyLocationInfo {
         .setLength(length)
         .setOffset(offset)
         .setCreateVersion(createVersion)
-        .setBlockCommitSequenceId(blockCommitSequenceId)
         .build();
   }
 
@@ -139,8 +130,7 @@ public final class OmKeyLocationInfo {
         BlockID.getFromProtobuf(keyLocation.getBlockID()),
         keyLocation.getShouldCreateContainer(),
         keyLocation.getLength(),
-        keyLocation.getOffset(),
-        keyLocation.getBlockCommitSequenceId());
+        keyLocation.getOffset());
     info.setCreateVersion(keyLocation.getCreateVersion());
     return info;
   }
@@ -152,7 +142,6 @@ public final class OmKeyLocationInfo {
         ", shouldCreateContainer=" + shouldCreateContainer +
         ", length=" + length +
         ", offset=" + offset +
-        ", blockCommitSequenceId=" + blockCommitSequenceId +
         ", createVersion=" + createVersion + '}';
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
index d36cace..8c4c409 100644
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -252,7 +252,6 @@ message KeyLocation {
     required uint64 length = 4;
     // indicated at which version this block gets created.
     optional uint64 createVersion = 5;
-    optional uint64 blockCommitSequenceId = 6;
 }
 
 message KeyLocationList {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 1789e55..5153b41 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -151,7 +151,7 @@ public class TestContainerReplication {
         .getHandler(ContainerType.KeyValueContainer);
 
     BlockData key = handler.getBlockManager()
-        .getBlock(container, BlockID.getFromProtobuf(blockID), 0);
+        .getBlock(container, BlockID.getFromProtobuf(blockID));
 
     Assert.assertNotNull(key);
     Assert.assertEquals(1, key.getChunks().size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
index 360b683..0ae63e3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
@@ -256,6 +256,6 @@ public class TestCloseContainerHandler {
         openContainerBlockMap.getBlockDataMap(testContainerID));
     // Make sure the key got committed
     Assert.assertNotNull(handler.getBlockManager()
-        .getBlock(container, blockID, 0));
+        .getBlock(container, blockID));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 293aac8..f81ee57 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -558,7 +558,7 @@ public class TestContainerPersistence {
     blockData.setChunks(chunkList);
     blockManager.putBlock(container, blockData);
     BlockData readBlockData = blockManager.
-        getBlock(container, blockData.getBlockID(), 0);
+        getBlock(container, blockData.getBlockID());
     ChunkInfo readChunk =
         ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
     Assert.assertEquals(info.getChecksum(), readChunk.getChecksum());
@@ -596,25 +596,27 @@ public class TestContainerPersistence {
     blockManager.putBlock(container, blockData);
     BlockData readBlockData;
     try {
+      blockID1.setBlockCommitSequenceId(5);
       // read with bcsId higher than container bcsId
       blockManager.
-          getBlock(container, blockID1, 5);
+          getBlock(container, blockID1);
       Assert.fail("Expected exception not thrown");
     } catch (StorageContainerException sce) {
       Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID);
     }
 
     try {
+      blockID1.setBlockCommitSequenceId(4);
       // read with bcsId lower than container bcsId but greater than committed
       // bcsId.
       blockManager.
-          getBlock(container, blockID1, 4);
+          getBlock(container, blockID1);
       Assert.fail("Expected exception not thrown");
     } catch (StorageContainerException sce) {
       Assert.assertTrue(sce.getResult() == BCSID_MISMATCH);
     }
     readBlockData = blockManager.
-        getBlock(container, blockData.getBlockID(), 4);
+        getBlock(container, blockData.getBlockID());
     ChunkInfo readChunk =
         ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
     Assert.assertEquals(info.getChecksum(), readChunk.getChecksum());
@@ -666,7 +668,7 @@ public class TestContainerPersistence {
     blockData.setChunks(chunkProtoList);
     blockManager.putBlock(container, blockData);
     BlockData readBlockData = blockManager.
-        getBlock(container, blockData.getBlockID(), 0);
+        getBlock(container, blockData.getBlockID());
     ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
     ChunkInfo readChunk =
         ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData
@@ -694,7 +696,7 @@ public class TestContainerPersistence {
     blockManager.deleteBlock(container, blockID);
     exception.expect(StorageContainerException.class);
     exception.expectMessage("Unable to find the block.");
-    blockManager.getBlock(container, blockData.getBlockID(), 0);
+    blockManager.getBlock(container, blockData.getBlockID());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
index 834dff0..974bb97 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
@@ -239,16 +239,18 @@ public class TestGetCommittedBlockLengthAndPutKey {
         ContainerTestHelper
             .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
     response = client.sendCommand(putKeyRequest).getPutBlock();
+    Assert.assertEquals(
+        response.getCommittedBlockLength().getBlockLength(), data.length);
+    Assert.assertTrue(response.getCommittedBlockLength().getBlockID()
+        .getBlockCommitSequenceId() > 0);
+    BlockID responseBlockID = BlockID
+        .getFromProtobuf(response.getCommittedBlockLength().getBlockID());
+    blockID
+        .setBlockCommitSequenceId(responseBlockID.getBlockCommitSequenceId());
     // make sure the block ids in the request and response are same.
     // This will also ensure that closing the container committed the block
     // on the Datanodes.
-    Assert.assertEquals(BlockID
-        .getFromProtobuf(response.getCommittedBlockLength().getBlockID()),
-        blockID);
-    Assert.assertEquals(
-        response.getCommittedBlockLength().getBlockLength(), data.length);
-    Assert.assertTrue(
-        response.getCommittedBlockLength().getBlockCommitSequenceId() > 0);
+    Assert.assertEquals(responseBlockID, blockID);
     xceiverClientManager.releaseClient(client);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 08905eb..d83d9a3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -700,7 +700,7 @@ public class TestKeys {
           KeyValueContainer container = (KeyValueContainer) cm.getContainerSet()
               .getContainer(location.getBlockID().getContainerID());
           BlockData blockInfo = keyValueHandler.getBlockManager()
-              .getBlock(container, location.getBlockID(), 0);
+              .getBlock(container, location.getBlockID());
           KeyValueContainerData containerData =
               (KeyValueContainerData) container.getContainerData();
           File dataDir = new File(containerData.getChunksPath());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 340197f..733ed85 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.ozone.om;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -169,10 +170,9 @@ public class KeyManagerImpl implements KeyManager {
       throw ex;
     }
     OmKeyLocationInfo info = new OmKeyLocationInfo.Builder()
-        .setBlockID(allocatedBlock.getBlockID())
+        .setBlockID(new BlockID(allocatedBlock.getBlockID()))
         .setShouldCreateContainer(allocatedBlock.getCreateContainer())
         .setLength(scmBlockSize)
-        .setBlockCommitSequenceId(0)
         .setOffset(0)
         .build();
     // current version not committed, so new blocks coming now are added to
@@ -234,10 +234,9 @@ public class KeyManagerImpl implements KeyManager {
           throw ex;
         }
         OmKeyLocationInfo subKeyInfo = new OmKeyLocationInfo.Builder()
-            .setBlockID(allocatedBlock.getBlockID())
+            .setBlockID(new BlockID(allocatedBlock.getBlockID()))
             .setShouldCreateContainer(allocatedBlock.getCreateContainer())
             .setLength(allocateSize)
-            .setBlockCommitSequenceId(0)
             .setOffset(0)
             .build();
         locations.add(subKeyInfo);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7757331d/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
index 259f842..2076ced 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmInfo;
@@ -120,7 +121,7 @@ public class ScmBlockLocationTestIngClient implements ScmBlockLocationProtocol {
     long localID = Time.monotonicNow();
     AllocatedBlock.Builder abb =
         new AllocatedBlock.Builder()
-            .setBlockID(new BlockID(containerID, localID))
+            .setContainerBlockID(new ContainerBlockID(containerID, localID))
             .setPipeline(pipeline)
             .setShouldCreateContainer(false);
     return abb.build();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDDS-682. Unified o3 address parsing for ozonen sh. Contributed by Elek, Marton.

Posted by su...@apache.org.
HDDS-682. Unified o3 address parsing for ozonen sh. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38a65e3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38a65e3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38a65e3b

Branch: refs/heads/HDFS-12943
Commit: 38a65e3b7c2fe26cbc9aa4a514ddcfbb4639cd66
Parents: a4b9b7c
Author: Hanisha Koneru <ha...@apache.org>
Authored: Thu Oct 25 17:04:16 2018 -0700
Committer: Hanisha Koneru <ha...@apache.org>
Committed: Thu Oct 25 17:04:16 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |   5 +-
 .../hadoop/hdds/cli/GenericParentCommand.java   |   4 +
 .../hadoop/ozone/ozShell/TestOzoneShell.java    |  77 +++---
 .../hadoop/ozone/web/ozShell/Handler.java       | 139 +---------
 .../hadoop/ozone/web/ozShell/OzoneAddress.java  | 251 +++++++++++++++++++
 .../web/ozShell/bucket/BucketCommands.java      |   6 +
 .../web/ozShell/bucket/CreateBucketHandler.java |  27 +-
 .../web/ozShell/bucket/DeleteBucketHandler.java |  20 +-
 .../web/ozShell/bucket/InfoBucketHandler.java   |  28 +--
 .../web/ozShell/bucket/ListBucketHandler.java   |  26 +-
 .../web/ozShell/bucket/S3BucketMapping.java     |  55 +---
 .../web/ozShell/bucket/UpdateBucketHandler.java |  20 +-
 .../web/ozShell/keys/DeleteKeyHandler.java      |  23 +-
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  18 +-
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |  28 +--
 .../ozone/web/ozShell/keys/KeyCommands.java     |   6 +
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |  28 +--
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  21 +-
 .../web/ozShell/volume/CreateVolumeHandler.java |  28 +--
 .../web/ozShell/volume/DeleteVolumeHandler.java |   8 +-
 .../web/ozShell/volume/InfoVolumeHandler.java   |   8 +-
 .../web/ozShell/volume/ListVolumeHandler.java   |  27 +-
 .../web/ozShell/volume/UpdateVolumeHandler.java |   8 +-
 .../web/ozShell/volume/VolumeCommands.java      |   6 +
 .../ozone/web/ozShell/TestOzoneAddress.java     | 100 ++++++++
 .../hadoop/ozone/web/ozShell/package-info.java  |  21 ++
 26 files changed, 574 insertions(+), 414 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index e0c8150..c7ccb66 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -62,7 +62,9 @@ public class GenericCli implements Callable<Void>, GenericParentCommand {
   }
 
   private void printError(Throwable error) {
-    if (verbose) {
+    //message could be null in case of NPE. This is unexpected so we can
+    //print out the stack trace.
+    if (verbose || error.getMessage() == null) {
       error.printStackTrace(System.err);
     } else {
       System.err.println(error.getMessage().split("\n")[0]);
@@ -77,6 +79,7 @@ public class GenericCli implements Callable<Void>, GenericParentCommand {
     throw new MissingSubcommandException(cmd.getUsageMessage());
   }
 
+  @Override
   public OzoneConfiguration createOzoneConfiguration() {
     OzoneConfiguration ozoneConf = new OzoneConfiguration();
     if (configurationOverrides != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
index a1d2171..6abad3e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java
@@ -16,10 +16,14 @@
  */
 package org.apache.hadoop.hdds.cli;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
 /**
  * Interface to access the higher level parameters.
  */
 public interface GenericParentCommand {
 
   boolean isVerbose();
+
+  OzoneConfiguration createOzoneConfiguration();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index a7eecc0..1900024 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.ozone.ozShell;
 
-import com.google.common.base.Strings;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
@@ -33,7 +32,6 @@ import java.util.Random;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
-import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.cli.MissingSubcommandException;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -63,9 +61,18 @@ import org.apache.hadoop.ozone.web.response.VolumeInfo;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.base.Strings;
+import org.apache.commons.lang3.RandomStringUtils;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -82,12 +89,6 @@ import picocli.CommandLine.ParameterException;
 import picocli.CommandLine.ParseResult;
 import picocli.CommandLine.RunLast;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 /**
  * This test class specified for testing Ozone shell command.
  */
@@ -209,8 +210,7 @@ public class TestOzoneShell {
     testCreateVolume(volumeName, "");
     volumeName = "volume" + RandomStringUtils.randomNumeric(5);
     testCreateVolume("/////" + volumeName, "");
-    testCreateVolume("/////", "Volume name is required " +
-        "to create a volume");
+    testCreateVolume("/////", "Volume name is required");
     testCreateVolume("/////vol/123",
         "Invalid volume name. Delimiters (/) not allowed in volume name");
   }
@@ -1126,36 +1126,49 @@ public class TestOzoneShell {
 
   @Test
   public void testS3BucketMapping() throws  IOException {
+
+    List<ServiceInfo> services =
+        cluster.getOzoneManager().getServiceList();
+
+    String omHostName = services.stream().filter(
+        a -> a.getNodeType().equals(HddsProtos.NodeType.OM))
+        .collect(Collectors.toList()).get(0).getHostname();
+
+    String omPort = cluster.getOzoneManager().getRpcPort();
+    String setOmAddress =
+        "--set=" + OZONE_OM_ADDRESS_KEY + "=" + omHostName + ":" + omPort;
+
     String s3Bucket = "bucket1";
     String commandOutput;
     createS3Bucket("ozone", s3Bucket);
+
+    //WHEN
+    String[] args =
+        new String[] {setOmAddress, "bucket",
+            "path", s3Bucket};
+    execute(shell, args);
+
+    //THEN
+    commandOutput = out.toString();
     String volumeName = client.getOzoneVolumeName(s3Bucket);
-    String[] args = new String[] {"bucket", "path", url + "/" + s3Bucket};
-    if (url.startsWith("o3")) {
-      execute(shell, args);
-      commandOutput = out.toString();
-      assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
-          volumeName));
-      assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME +"://" +
-          s3Bucket + "." + volumeName));
-      out.reset();
-      //Trying to get map for an unknown bucket
-      args = new String[] {"bucket", "path", url + "/" + "unknownbucket"};
-      executeWithError(shell, args, "S3_BUCKET_NOT_FOUND");
-    } else {
-      executeWithError(shell, args, "Ozone REST protocol does not support " +
-          "this operation");
-    }
+    assertTrue(commandOutput.contains("Volume name for S3Bucket is : " +
+        volumeName));
+    assertTrue(commandOutput.contains(OzoneConsts.OZONE_URI_SCHEME + "://" +
+        s3Bucket + "." + volumeName));
+    out.reset();
+
+    //Trying to get map for an unknown bucket
+    args = new String[] {setOmAddress, "bucket", "path",
+        "unknownbucket"};
+    executeWithError(shell, args, "S3_BUCKET_NOT_FOUND");
 
     // No bucket name
-    args = new String[] {"bucket", "path", url};
-    executeWithError(shell, args, "S3Bucket name is required");
+    args = new String[] {setOmAddress, "bucket", "path"};
+    executeWithError(shell, args, "Missing required parameter");
 
     // Invalid bucket name
-    args = new String[] {"bucket", "path", url + "/" + s3Bucket +
-          "/multipleslash"};
-    executeWithError(shell, args, "Invalid S3Bucket name. Delimiters (/) not" +
-        " allowed");
+    args = new String[] {setOmAddress, "bucket", "path", "/asd/multipleslash"};
+    executeWithError(shell, args, "S3_BUCKET_NOT_FOUND");
   }
 
   private void createS3Bucket(String userName, String s3Bucket) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
index be579b3..6405eef 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Handler.java
@@ -18,25 +18,12 @@
 
 package org.apache.hadoop.ozone.web.ozShell;
 
-import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.concurrent.Callable;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.cli.GenericParentCommand;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientException;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
 
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_HTTP_SCHEME;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RPC_SCHEME;
-import org.apache.http.client.utils.URIBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import picocli.CommandLine.Command;
@@ -51,8 +38,6 @@ public abstract class Handler implements Callable<Void> {
 
   protected static final Logger LOG = LoggerFactory.getLogger(Handler.class);
 
-  protected OzoneClient client;
-
   @ParentCommand
   private GenericParentCommand parent;
 
@@ -61,128 +46,12 @@ public abstract class Handler implements Callable<Void> {
     throw new UnsupportedOperationException();
   }
 
-  /**
-   * verifies user provided URI.
-   *
-   * @param uri - UriString
-   * @return URI
-   * @throws URISyntaxException
-   * @throws OzoneException
-   */
-  protected URI verifyURI(String uri)
-      throws URISyntaxException, OzoneException, IOException {
-    if ((uri == null) || uri.isEmpty()) {
-      throw new OzoneClientException(
-          "Ozone URI is needed to execute this command.");
-    }
-    URIBuilder ozoneURI = new URIBuilder(stringToUri(uri));
-    if (ozoneURI.getPort() == 0) {
-      ozoneURI.setPort(Shell.DEFAULT_OZONE_PORT);
-    }
-
-    Configuration conf = new OzoneConfiguration();
-    String scheme = ozoneURI.getScheme();
-    if (ozoneURI.getScheme() == null || scheme.isEmpty()) {
-      scheme = OZONE_RPC_SCHEME;
-    }
-    if (scheme.equals(OZONE_HTTP_SCHEME)) {
-      if (ozoneURI.getHost() != null) {
-        if (ozoneURI.getPort() == -1) {
-          client = OzoneClientFactory.getRestClient(ozoneURI.getHost());
-        } else {
-          client = OzoneClientFactory
-              .getRestClient(ozoneURI.getHost(), ozoneURI.getPort(), conf);
-        }
-      } else {
-        client = OzoneClientFactory.getRestClient(conf);
-      }
-    } else if (scheme.equals(OZONE_RPC_SCHEME)) {
-      if (ozoneURI.getHost() != null) {
-        if (ozoneURI.getPort() == -1) {
-          client = OzoneClientFactory.getRpcClient(ozoneURI.getHost());
-        } else {
-          client = OzoneClientFactory
-              .getRpcClient(ozoneURI.getHost(), ozoneURI.getPort(), conf);
-        }
-      } else {
-        client = OzoneClientFactory.getRpcClient(conf);
-      }
-    } else {
-      throw new OzoneClientException("Invalid URI: " + ozoneURI);
-    }
-    return ozoneURI.build();
-  }
-
-  /** Construct a URI from a String with unescaped special characters
-   *  that have non-standard semantics. e.g. /, ?, #. A custom parsing
-   *  is needed to prevent misbehavior.
-   *  @param pathString The input path in string form
-   *  @return URI
-   */
-  private static URI stringToUri(String pathString) throws IOException {
-    // parse uri components
-    String scheme = null;
-    String authority = null;
-    int start = 0;
-
-    // parse uri scheme, if any
-    int colon = pathString.indexOf(':');
-    int slash = pathString.indexOf('/');
-    if (colon > 0 && (slash == colon +1)) {
-      // has a non zero-length scheme
-      scheme = pathString.substring(0, colon);
-      start = colon + 1;
-    }
-
-    // parse uri authority, if any
-    if (pathString.startsWith("//", start) &&
-        (pathString.length()-start > 2)) {
-      start += 2;
-      int nextSlash = pathString.indexOf('/', start);
-      int authEnd = nextSlash > 0 ? nextSlash : pathString.length();
-      authority = pathString.substring(start, authEnd);
-      start = authEnd;
-    }
-    // uri path is the rest of the string. ? or # are not interpreted,
-    // but any occurrence of them will be quoted by the URI ctor.
-    String path = pathString.substring(start, pathString.length());
-
-    // Construct the URI
-    try {
-      return new URI(scheme, authority, path, null, null);
-    } catch (URISyntaxException e) {
-      throw new IllegalArgumentException(e);
-    }
-  }
-
-  /**
-   *
-   * @param uri
-   * @return volumeName
-   * @throws Exception
-   * @throws OzoneClientException when uri is null or invalid volume name
-   */
-  protected String parseVolumeName(String uri) throws Exception{
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    int pathNameCount = path.getNameCount();
-    if (pathNameCount != 1) {
-      String errorMessage;
-      if (pathNameCount < 1) {
-        errorMessage = "Volume name is required to perform volume " +
-            "operations like info, update, create and delete. ";
-      } else {
-        errorMessage = "Invalid volume name. Delimiters (/) not allowed in " +
-            "volume name";
-      }
-      throw new OzoneClientException(errorMessage);
-    }
-
-    return ozoneURI.getPath().replaceAll("^/+", "");
-  }
-
   public boolean isVerbose() {
     return parent.isVerbose();
   }
 
+  public OzoneConfiguration createOzoneConfiguration() {
+    return parent.createOzoneConfiguration();
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java
new file mode 100644
index 0000000..63b7576
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/OzoneAddress.java
@@ -0,0 +1,251 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.web.ozShell;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.rest.OzoneException;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_HTTP_SCHEME;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RPC_SCHEME;
+import org.apache.http.client.utils.URIBuilder;
+
+/**
+ * Address of an ozone object for ozone shell.
+ */
+public class OzoneAddress {
+
+  private static final String EMPTY_HOST = "___DEFAULT___";
+
+  private URI ozoneURI;
+
+  private String volumeName = "";
+
+  private String bucketName = "";
+
+  private String keyName = "";
+
+  public OzoneAddress() throws OzoneException {
+    this("o3:///");
+  }
+
+  public OzoneAddress(String address)
+      throws OzoneException {
+    if (address == null || address.equals("")) {
+      address = OZONE_RPC_SCHEME + ":///";
+    }
+    this.ozoneURI = parseURI(address);
+    String path = this.ozoneURI.getPath();
+
+    path = path.replaceAll("^/+", "");
+
+    int sep1 = path.indexOf('/');
+    int sep2 = path.indexOf('/', sep1 + 1);
+
+    if (sep1 == -1) {
+      volumeName = path;
+    } else {
+      //we have vol/bucket
+      volumeName = path.substring(0, sep1);
+      if (sep2 == -1) {
+        bucketName = path.substring(sep1 + 1);
+      } else {
+        //we have vol/bucket/key/.../...
+        bucketName = path.substring(sep1 + 1, sep2);
+        keyName = path.substring(sep2 + 1);
+      }
+    }
+
+  }
+
+  public OzoneClient createClient(OzoneConfiguration conf)
+      throws IOException, OzoneClientException {
+    OzoneClient client;
+    String scheme = ozoneURI.getScheme();
+    if (ozoneURI.getScheme() == null || scheme.isEmpty()) {
+      scheme = OZONE_RPC_SCHEME;
+    }
+    if (scheme.equals(OZONE_HTTP_SCHEME)) {
+      if (ozoneURI.getHost() != null && !ozoneURI.getAuthority()
+          .equals(EMPTY_HOST)) {
+        if (ozoneURI.getPort() == -1) {
+          client = OzoneClientFactory.getRestClient(ozoneURI.getHost());
+        } else {
+          client = OzoneClientFactory
+              .getRestClient(ozoneURI.getHost(), ozoneURI.getPort(), conf);
+        }
+      } else {
+        client = OzoneClientFactory.getRestClient(conf);
+      }
+    } else if (scheme.equals(OZONE_RPC_SCHEME)) {
+      if (ozoneURI.getHost() != null && !ozoneURI.getAuthority()
+          .equals(EMPTY_HOST)) {
+        if (ozoneURI.getPort() == -1) {
+          client = OzoneClientFactory.getRpcClient(ozoneURI.getHost());
+        } else {
+          client = OzoneClientFactory
+              .getRpcClient(ozoneURI.getHost(), ozoneURI.getPort(), conf);
+        }
+      } else {
+        client = OzoneClientFactory.getRpcClient(conf);
+      }
+    } else {
+      throw new OzoneClientException(
+          "Invalid URI, unknown protocol scheme: " + scheme);
+    }
+    return client;
+  }
+
+  /**
+   * verifies user provided URI.
+   *
+   * @param uri - UriString
+   * @return URI
+   * @throws URISyntaxException
+   * @throws OzoneException
+   */
+  protected URI parseURI(String uri)
+      throws OzoneException {
+    if ((uri == null) || uri.isEmpty()) {
+      throw new OzoneClientException(
+          "Ozone URI is needed to execute this command.");
+    }
+    URIBuilder uriBuilder = new URIBuilder(stringToUri(uri));
+    if (uriBuilder.getPort() == 0) {
+      uriBuilder.setPort(Shell.DEFAULT_OZONE_PORT);
+    }
+
+    try {
+      return uriBuilder.build();
+    } catch (URISyntaxException e) {
+      throw new OzoneClientException("Invalid URI: " + ozoneURI, e);
+    }
+  }
+
+  /**
+   * Construct a URI from a String with unescaped special characters
+   * that have non-standard semantics. e.g. /, ?, #. A custom parsing
+   * is needed to prevent misbehavior.
+   *
+   * @param pathString The input path in string form
+   * @return URI
+   */
+  private static URI stringToUri(String pathString) {
+    // parse uri components
+    String scheme = null;
+    String authority = null;
+    int start = 0;
+
+    // parse uri scheme, if any
+    int colon = pathString.indexOf(':');
+    int slash = pathString.indexOf('/');
+    if (colon > 0 && (slash == colon + 1)) {
+      // has a non zero-length scheme
+      scheme = pathString.substring(0, colon);
+      start = colon + 1;
+    }
+
+    // parse uri authority, if any
+    if (pathString.startsWith("//", start) &&
+        (pathString.length() - start > 2)) {
+      start += 2;
+      int nextSlash = pathString.indexOf('/', start);
+      int authEnd = nextSlash > 0 ? nextSlash : pathString.length();
+      authority = pathString.substring(start, authEnd);
+      start = authEnd;
+    }
+    // uri path is the rest of the string. ? or # are not interpreted,
+    // but any occurrence of them will be quoted by the URI ctor.
+    String path = pathString.substring(start, pathString.length());
+
+    if (authority == null || authority.equals("")) {
+      authority = EMPTY_HOST;
+    }
+    // Construct the URI
+    try {
+      return new URI(scheme, authority, path, null, null);
+    } catch (URISyntaxException e) {
+      throw new IllegalArgumentException(e);
+    }
+  }
+
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  public String getKeyName() {
+    return keyName;
+  }
+
+  public void ensureBucketAddress() throws OzoneClientException {
+    if (keyName.length() > 0) {
+      throw new OzoneClientException(
+          "Invalid bucket name. Delimiters (/) not allowed in bucket name");
+    } else if (volumeName.length() == 0) {
+      throw new OzoneClientException(
+          "Volume name is required.");
+    } else if (bucketName.length() == 0) {
+      throw new OzoneClientException(
+          "Bucket name is required.");
+    }
+  }
+
+  public void ensureKeyAddress() throws OzoneClientException {
+    if (keyName.length() == 0) {
+      throw new OzoneClientException(
+          "Key name is missing.");
+    } else if (volumeName.length() == 0) {
+      throw new OzoneClientException(
+          "Volume name is missing");
+    } else if (bucketName.length() == 0) {
+      throw new OzoneClientException(
+          "Bucket name is missing");
+    }
+  }
+
+  public void ensureVolumeAddress() throws OzoneClientException {
+    if (keyName.length() != 0) {
+      throw new OzoneClientException(
+          "Invalid volume name. Delimiters (/) not allowed in volume name");
+    } else if (volumeName.length() == 0) {
+      throw new OzoneClientException(
+          "Volume name is required");
+    } else if (bucketName.length() != 0) {
+      throw new OzoneClientException(
+          "Invalid volume name. Delimiters (/) not allowed in volume name");
+    }
+  }
+
+  public void ensureRootAddress() throws OzoneClientException {
+    if (keyName.length() != 0 || bucketName.length() != 0
+        || volumeName.length() != 0) {
+      throw new OzoneClientException(
+          "Invalid URI. Volume/bucket/key elements should not been used");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
index 617643c..9e8bbca 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/BucketCommands.java
@@ -23,6 +23,7 @@ import java.util.concurrent.Callable;
 import org.apache.hadoop.hdds.cli.GenericParentCommand;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.cli.MissingSubcommandException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import picocli.CommandLine.Command;
@@ -58,4 +59,9 @@ public class BucketCommands implements GenericParentCommand, Callable<Void> {
   public boolean isVerbose() {
     return shell.isVerbose();
   }
+
+  @Override
+  public OzoneConfiguration createOzoneConfiguration() {
+    return shell.createOzoneConfiguration();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
index 49bbc18..88b5176 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/CreateBucketHandler.java
@@ -17,15 +17,12 @@
  */
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
@@ -48,22 +45,12 @@ public class CreateBucketHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    int pathNameCount = path.getNameCount();
-    if (pathNameCount != 2) {
-      String errorMessage;
-      if (pathNameCount < 2) {
-        errorMessage = "volume and bucket name required in createBucket";
-      } else {
-        errorMessage = "Invalid bucket name. Delimiters (/) not allowed in " +
-            "bucket name";
-      }
-      throw new OzoneClientException(errorMessage);
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureBucketAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
-    String volumeName = path.getName(0).toString();
-    String bucketName = path.getName(1).toString();
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
 
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
index 79a0c8e..6ed6ddf 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/DeleteBucketHandler.java
@@ -18,13 +18,10 @@
 
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import picocli.CommandLine.Command;
@@ -46,15 +43,12 @@ public class DeleteBucketHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    if (path.getNameCount() < 2) {
-      throw new OzoneClientException(
-          "volume and bucket name required in delete Bucket");
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureBucketAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
-    String volumeName = path.getName(0).toString();
-    String bucketName = path.getName(1).toString();
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
 
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
index 18f8a0b..c0f35f2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/InfoBucketHandler.java
@@ -17,15 +17,12 @@
  */
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
@@ -47,23 +44,12 @@ public class InfoBucketHandler extends Handler {
    */
   @Override
   public Void call() throws Exception {
-    String volumeName, bucketName;
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    int pathNameCount = path.getNameCount();
-    if (pathNameCount != 2) {
-      String errorMessage;
-      if (pathNameCount < 2) {
-        errorMessage = "volume and bucket name required in infoBucket";
-      } else {
-        errorMessage = "Invalid bucket name. Delimiters (/) not allowed in " +
-            "bucket name";
-      }
-      throw new OzoneClientException(errorMessage);
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureBucketAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
-    volumeName = path.getName(0).toString();
-    bucketName = path.getName(1).toString();
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
 
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
index 7c3029a..8bc0029 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
@@ -18,19 +18,17 @@
 
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
@@ -69,27 +67,16 @@ public class ListBucketHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    int pathNameCount = path.getNameCount();
-    if (pathNameCount != 1) {
-      String errorMessage;
-      if (pathNameCount < 1) {
-        errorMessage = "volume is required in listBucket";
-      } else {
-        errorMessage = "Invalid volume name. Delimiters (/) not allowed in " +
-            "volume name";
-      }
-      throw new OzoneClientException(errorMessage);
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureVolumeAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
+    String volumeName = address.getVolumeName();
     if (maxBuckets < 1) {
       throw new IllegalArgumentException(
           "the length should be a positive number");
     }
 
-    String volumeName = path.getName(0).toString();
-
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);
     }
@@ -114,5 +101,6 @@ public class ListBucketHandler extends Handler {
         JsonUtils.toJsonString(bucketList)));
     return null;
   }
+
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java
index 21ae746..d147f9b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/S3BucketMapping.java
@@ -17,15 +17,10 @@
  */
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 
 import picocli.CommandLine.Command;
 import picocli.CommandLine.Parameters;
@@ -38,9 +33,8 @@ import picocli.CommandLine.Parameters;
     description = "Returns the ozone path for S3Bucket")
 public class S3BucketMapping extends Handler {
 
-  @Parameters(arity = "1..1", description = Shell
-      .OZONE_S3BUCKET_URI_DESCRIPTION)
-  private String uri;
+  @Parameters(arity = "1..1", description = "Name of the s3 bucket.")
+  private String s3BucketName;
 
   /**
    * Executes create bucket.
@@ -48,40 +42,13 @@ public class S3BucketMapping extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    int pathNameCount = path.getNameCount();
-    String errorMessage;
-
-    // When just uri is given as http://om:9874, we are getting pathCount
-    // still as 1, as getPath() is returning empty string.
-    // So for safer side check, whether it is an empty string
-    if (pathNameCount == 1) {
-      String s3Bucket = path.getName(0).toString();
-      if (StringUtils.isBlank(s3Bucket)) {
-        errorMessage = "S3Bucket name is required to get volume name and " +
-            "Ozone fs Uri";
-        throw new OzoneClientException(errorMessage);
-      }
-    }
-    if (pathNameCount != 1) {
-      if (pathNameCount < 1) {
-        errorMessage = "S3Bucket name is required to get volume name and " +
-            "Ozone fs Uri";
-      } else {
-        errorMessage = "Invalid S3Bucket name. Delimiters (/) not allowed in " +
-            "S3Bucket name";
-      }
-      throw new OzoneClientException(errorMessage);
-    }
-
-    String s3Bucket = path.getName(0).toString();
-    if (isVerbose()) {
-      System.out.printf("S3Bucket Name : %s%n", s3Bucket);
-    }
+    OzoneAddress ozoneAddress = new OzoneAddress();
+    OzoneClient client = ozoneAddress.createClient(createOzoneConfiguration());
 
-    String mapping = client.getObjectStore().getOzoneBucketMapping(s3Bucket);
-    String volumeName = client.getObjectStore().getOzoneVolumeName(s3Bucket);
+    String mapping =
+        client.getObjectStore().getOzoneBucketMapping(s3BucketName);
+    String volumeName =
+        client.getObjectStore().getOzoneVolumeName(s3BucketName);
 
     if (isVerbose()) {
       System.out.printf("Mapping created for S3Bucket is : %s%n", mapping);
@@ -90,7 +57,7 @@ public class S3BucketMapping extends Handler {
     System.out.printf("Volume name for S3Bucket is : %s%n", volumeName);
 
     String ozoneFsUri = String.format("%s://%s.%s", OzoneConsts
-        .OZONE_URI_SCHEME, s3Bucket, volumeName);
+        .OZONE_URI_SCHEME, s3BucketName, volumeName);
 
     System.out.printf("Ozone FileSystem Uri is : %s%n", ozoneFsUri);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java
index 3562dc0..28ee5fc 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/UpdateBucketHandler.java
@@ -17,19 +17,17 @@
  */
 package org.apache.hadoop.ozone.web.ozShell.bucket;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.Arrays;
 import java.util.List;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
@@ -60,16 +58,12 @@ public class UpdateBucketHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureBucketAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
-    if (path.getNameCount() < 2) {
-      throw new OzoneClientException(
-          "volume and bucket name required in update bucket");
-    }
-
-    String volumeName = path.getName(0).toString();
-    String bucketName = path.getName(1).toString();
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
 
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
index dff6e67..6f8bdff 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/DeleteKeyHandler.java
@@ -18,14 +18,11 @@
 
 package org.apache.hadoop.ozone.web.ozShell.keys;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import picocli.CommandLine.Command;
@@ -47,16 +44,14 @@ public class DeleteKeyHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    if (path.getNameCount() < 3) {
-      throw new OzoneClientException(
-          "volume/bucket/key name required in deleteKey");
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureKeyAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
+
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
+    String keyName = address.getKeyName();
 
-    String volumeName = path.getName(0).toString();
-    String bucketName = path.getName(1).toString();
-    String keyName = path.getName(2).toString();
 
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
index 583d22b..be9342d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/GetKeyHandler.java
@@ -21,17 +21,18 @@ package org.apache.hadoop.ozone.web.ozShell.keys;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
-import java.net.URI;
 import java.nio.file.Path;
 import java.nio.file.Paths;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import org.apache.commons.codec.digest.DigestUtils;
@@ -61,16 +62,13 @@ public class GetKeyHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    if (path.getNameCount() < 3) {
-      throw new OzoneClientException(
-          "volume/bucket/key name required in putKey");
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureKeyAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
-    String volumeName = path.getName(0).toString();
-    String bucketName = path.getName(1).toString();
-    String keyName = path.getName(2).toString();
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
+    String keyName = address.getKeyName();
 
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
index 6ae9b6f..fa345e3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/InfoKeyHandler.java
@@ -18,17 +18,13 @@
 
 package org.apache.hadoop.ozone.web.ozShell.keys;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
@@ -49,22 +45,14 @@ public class InfoKeyHandler extends Handler {
    */
   @Override
   public Void call() throws Exception {
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    if (path.getNameCount() < 3) {
-      throw new OzoneClientException(
-          "volume/bucket/key name required in infoKey");
-    }
-
-    String volumeName = path.getName(0).toString();
-    String bucketName = path.getName(1).toString();
 
-    String searchString = volumeName + OzoneConsts.OZONE_URI_DELIMITER +
-        bucketName + OzoneConsts.OZONE_URI_DELIMITER;
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureKeyAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
-    String keyName =
-        uri.substring(uri.indexOf(searchString) +
-                searchString.length());
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
+    String keyName = address.getKeyName();
 
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java
index d034686..aada556 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/KeyCommands.java
@@ -23,6 +23,7 @@ import java.util.concurrent.Callable;
 import org.apache.hadoop.hdds.cli.GenericParentCommand;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.cli.MissingSubcommandException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import picocli.CommandLine.Command;
@@ -57,4 +58,9 @@ public class KeyCommands implements GenericParentCommand, Callable<Void> {
   public boolean isVerbose() {
     return shell.isVerbose();
   }
+
+  @Override
+  public OzoneConfiguration createOzoneConfiguration() {
+    return shell.createOzoneConfiguration();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
index c539d60..5642bc7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
@@ -18,20 +18,18 @@
 
 package org.apache.hadoop.ozone.web.ozShell.keys;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
@@ -69,28 +67,18 @@ public class ListKeyHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    int pathNameCount = path.getNameCount();
-    if (pathNameCount != 2) {
-      String errorMessage;
-      if (pathNameCount < 2) {
-        errorMessage = "volume/bucket is required in listKey";
-      } else {
-        errorMessage = "Invalid bucket name. Delimiters (/) not allowed in " +
-            "bucket name";
-      }
-      throw new OzoneClientException(errorMessage);
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureBucketAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
+
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
 
     if (maxKeys < 1) {
       throw new IllegalArgumentException(
           "the length should be a positive number");
     }
 
-    String volumeName = path.getName(0).toString();
-    String bucketName = path.getName(1).toString();
-
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);
       System.out.printf("bucket Name : %s%n", bucketName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
index bbd3235..b2ab68f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/PutKeyHandler.java
@@ -20,9 +20,6 @@ package org.apache.hadoop.ozone.web.ozShell.keys;
 
 import java.io.File;
 import java.io.FileInputStream;
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -30,10 +27,11 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import org.apache.commons.codec.digest.DigestUtils;
@@ -71,16 +69,13 @@ public class PutKeyHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    if (path.getNameCount() < 3) {
-      throw new OzoneClientException(
-          "volume/bucket/key name required in putKey");
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureKeyAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
-    String volumeName = path.getName(0).toString();
-    String bucketName = path.getName(1).toString();
-    String keyName = path.getName(2).toString();
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
+    String keyName = address.getKeyName();
 
     if (isVerbose()) {
       System.out.printf("Volume Name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
index 84922a7..bee6c49 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/CreateVolumeHandler.java
@@ -18,19 +18,16 @@
 
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
-import java.net.URI;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-
-import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
 import org.apache.hadoop.security.UserGroupInformation;
+
 import picocli.CommandLine.Command;
 import picocli.CommandLine.Option;
 import picocli.CommandLine.Parameters;
@@ -68,21 +65,12 @@ public class CreateVolumeHandler extends Handler {
       userName = UserGroupInformation.getCurrentUser().getUserName();
     }
 
-    URI ozoneURI = verifyURI(uri);
-    Path path = Paths.get(ozoneURI.getPath());
-    int pathNameCount = path.getNameCount();
-    if (pathNameCount != 1) {
-      String errorMessage;
-      if (pathNameCount < 1) {
-        errorMessage = "Volume name is required to create a volume";
-      } else {
-        errorMessage = "Invalid volume name. Delimiters (/) not allowed in " +
-            "volume name";
-      }
-      throw new OzoneClientException(errorMessage);
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureVolumeAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
+
+    String volumeName = address.getVolumeName();
 
-    String volumeName = ozoneURI.getPath().replaceAll("^/+", "");
     if (isVerbose()) {
       System.out.printf("Volume name : %s%n", volumeName);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
index d757e20..87286d2 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/DeleteVolumeHandler.java
@@ -18,7 +18,9 @@
 
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import picocli.CommandLine.Command;
@@ -40,7 +42,11 @@ public class DeleteVolumeHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    String volumeName = parseVolumeName(uri);
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureVolumeAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
+
+    String volumeName = address.getVolumeName();
 
     if (isVerbose()) {
       System.out.printf("Volume name : %s%n", volumeName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
index 48ed9f6..9c5f872 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/InfoVolumeHandler.java
@@ -18,9 +18,11 @@
 
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
@@ -43,7 +45,11 @@ public class InfoVolumeHandler extends Handler{
   @Override
   public Void call() throws Exception {
 
-    String volumeName = parseVolumeName(uri);
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureVolumeAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
+
+    String volumeName = address.getVolumeName();
 
     OzoneVolume vol = client.getObjectStore().getVolume(volumeName);
     System.out.printf("%s%n", JsonUtils.toJsonStringWithDefaultPrettyPrinter(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
index 948a45c..1b7f2a3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/ListVolumeHandler.java
@@ -18,23 +18,22 @@
 
 package org.apache.hadoop.ozone.web.ozShell.volume;
 
-import com.google.common.base.Strings;
-import picocli.CommandLine.Command;
-import picocli.CommandLine.Option;
-import picocli.CommandLine.Parameters;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
 
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.Parameters;
 
 /**
  * Executes List Volume call.
@@ -72,13 +71,9 @@ public class ListVolumeHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    URI ozoneURI = verifyURI(uri);
-    if (!Strings.isNullOrEmpty(ozoneURI.getPath()) && !ozoneURI.getPath()
-        .equals("/")) {
-      throw new OzoneClientException(
-          "Invalid URI: " + ozoneURI + " . Specified path not used." + ozoneURI
-              .getPath());
-    }
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureRootAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
 
     if (userName == null) {
       userName = System.getProperty("user.name");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
index 46803d8..06f591b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/UpdateVolumeHandler.java
@@ -23,9 +23,11 @@ import picocli.CommandLine.Option;
 import picocli.CommandLine.Parameters;
 
 import org.apache.hadoop.hdds.client.OzoneQuota;
+import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.OzoneClientUtils;
 import org.apache.hadoop.ozone.web.ozShell.Handler;
+import org.apache.hadoop.ozone.web.ozShell.OzoneAddress;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
@@ -54,7 +56,11 @@ public class UpdateVolumeHandler extends Handler {
   @Override
   public Void call() throws Exception {
 
-    String volumeName = parseVolumeName(uri);
+    OzoneAddress address = new OzoneAddress(uri);
+    address.ensureVolumeAddress();
+    OzoneClient client = address.createClient(createOzoneConfiguration());
+
+    String volumeName = address.getVolumeName();
 
     OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
     if (quota != null && !quota.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java
index e3f5d2d..1c3afe9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/volume/VolumeCommands.java
@@ -23,6 +23,7 @@ import java.util.concurrent.Callable;
 import org.apache.hadoop.hdds.cli.GenericParentCommand;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.cli.MissingSubcommandException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 
 import picocli.CommandLine.Command;
@@ -58,4 +59,9 @@ public class VolumeCommands implements GenericParentCommand, Callable<Void> {
   public boolean isVerbose() {
     return shell.isVerbose();
   }
+
+  @Override
+  public OzoneConfiguration createOzoneConfiguration() {
+    return shell.createOzoneConfiguration();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java
new file mode 100644
index 0000000..319c4ce
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/TestOzoneAddress.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web.ozShell;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+
+import org.apache.hadoop.ozone.client.rest.OzoneException;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+/**
+ * Test ozone URL parsing.
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneAddress {
+
+  @Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {
+        {"o3fs://localhost:9878/"},
+        {"o3fs://localhost/"},
+        {"o3fs:///"},
+        {"http://localhost:9878/"},
+        {"http://localhost/"},
+        {"http:///"},
+        {"/"}
+    });
+  }
+
+  private String prefix;
+
+  public TestOzoneAddress(String prefix) {
+    this.prefix = prefix;
+  }
+
+  @Test
+  public void checkUrlTypes() throws OzoneException, IOException {
+    OzoneAddress address;
+
+    address = new OzoneAddress("");
+    address.ensureRootAddress();
+
+    address = new OzoneAddress(prefix + "");
+    address.ensureRootAddress();
+
+    address = new OzoneAddress(prefix + "vol1");
+    address.ensureVolumeAddress();
+    Assert.assertEquals("vol1", address.getVolumeName());
+
+    address = new OzoneAddress(prefix + "vol1/bucket");
+    address.ensureBucketAddress();
+    Assert.assertEquals("vol1", address.getVolumeName());
+    Assert.assertEquals("bucket", address.getBucketName());
+
+    address = new OzoneAddress(prefix + "vol1/bucket/");
+    address.ensureBucketAddress();
+    Assert.assertEquals("vol1", address.getVolumeName());
+    Assert.assertEquals("bucket", address.getBucketName());
+
+    address = new OzoneAddress(prefix + "vol1/bucket/key");
+    address.ensureKeyAddress();
+    Assert.assertEquals("vol1", address.getVolumeName());
+    Assert.assertEquals("bucket", address.getBucketName());
+    Assert.assertEquals("key", address.getKeyName());
+
+    address = new OzoneAddress(prefix + "vol1/bucket/key/");
+    address.ensureKeyAddress();
+    Assert.assertEquals("vol1", address.getVolumeName());
+    Assert.assertEquals("bucket", address.getBucketName());
+    Assert.assertEquals("key/", address.getKeyName());
+
+    address = new OzoneAddress(prefix + "vol1/bucket/key1/key3/key");
+    address.ensureKeyAddress();
+    Assert.assertEquals("vol1", address.getVolumeName());
+    Assert.assertEquals("bucket", address.getBucketName());
+    Assert.assertEquals("key1/key3/key", address.getKeyName());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38a65e3b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java
new file mode 100644
index 0000000..80c1985
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/web/ozShell/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.web.ozShell;
+/**
+ * Tests for ozone shell..
+ */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: YARN-8854. Upgrade jquery datatable version references to v1.10.19. Contributed by Akhil PB.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
deleted file mode 100644
index 85dd817..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js
+++ /dev/null
@@ -1,160 +0,0 @@
-/*! DataTables 1.10.7
- * ©2008-2015 SpryMedia Ltd - datatables.net/license
- */
-(function(Ea,Q,k){var P=function(h){function W(a){var b,c,e={};h.each(a,function(d){if((b=d.match(/^([^A-Z]+?)([A-Z])/))&&-1!=="a aa ai ao as b fn i m o s ".indexOf(b[1]+" "))c=d.replace(b[0],b[2].toLowerCase()),e[c]=d,"o"===b[1]&&W(a[d])});a._hungarianMap=e}function H(a,b,c){a._hungarianMap||W(a);var e;h.each(b,function(d){e=a._hungarianMap[d];if(e!==k&&(c||b[e]===k))"o"===e.charAt(0)?(b[e]||(b[e]={}),h.extend(!0,b[e],b[d]),H(a[e],b[e],c)):b[e]=b[d]})}function P(a){var b=m.defaults.oLanguage,c=a.sZeroRecords;
-!a.sEmptyTable&&(c&&"No data available in table"===b.sEmptyTable)&&E(a,a,"sZeroRecords","sEmptyTable");!a.sLoadingRecords&&(c&&"Loading..."===b.sLoadingRecords)&&E(a,a,"sZeroRecords","sLoadingRecords");a.sInfoThousands&&(a.sThousands=a.sInfoThousands);(a=a.sDecimal)&&db(a)}function eb(a){A(a,"ordering","bSort");A(a,"orderMulti","bSortMulti");A(a,"orderClasses","bSortClasses");A(a,"orderCellsTop","bSortCellsTop");A(a,"order","aaSorting");A(a,"orderFixed","aaSortingFixed");A(a,"paging","bPaginate");
-A(a,"pagingType","sPaginationType");A(a,"pageLength","iDisplayLength");A(a,"searching","bFilter");if(a=a.aoSearchCols)for(var b=0,c=a.length;b<c;b++)a[b]&&H(m.models.oSearch,a[b])}function fb(a){A(a,"orderable","bSortable");A(a,"orderData","aDataSort");A(a,"orderSequence","asSorting");A(a,"orderDataType","sortDataType");var b=a.aDataSort;b&&!h.isArray(b)&&(a.aDataSort=[b])}function gb(a){var a=a.oBrowser,b=h("<div/>").css({position:"absolute",top:0,left:0,height:1,width:1,overflow:"hidden"}).append(h("<div/>").css({position:"absolute",
-top:1,left:1,width:100,overflow:"scroll"}).append(h('<div class="test"/>').css({width:"100%",height:10}))).appendTo("body"),c=b.find(".test");a.bScrollOversize=100===c[0].offsetWidth;a.bScrollbarLeft=1!==Math.round(c.offset().left);b.remove()}function hb(a,b,c,e,d,f){var g,j=!1;c!==k&&(g=c,j=!0);for(;e!==d;)a.hasOwnProperty(e)&&(g=j?b(g,a[e],e,a):a[e],j=!0,e+=f);return g}function Fa(a,b){var c=m.defaults.column,e=a.aoColumns.length,c=h.extend({},m.models.oColumn,c,{nTh:b?b:Q.createElement("th"),sTitle:c.sTitle?
-c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[e],mData:c.mData?c.mData:e,idx:e});a.aoColumns.push(c);c=a.aoPreSearchCols;c[e]=h.extend({},m.models.oSearch,c[e]);ka(a,e,h(b).data())}function ka(a,b,c){var b=a.aoColumns[b],e=a.oClasses,d=h(b.nTh);if(!b.sWidthOrig){b.sWidthOrig=d.attr("width")||null;var f=(d.attr("style")||"").match(/width:\s*(\d+[pxem%]+)/);f&&(b.sWidthOrig=f[1])}c!==k&&null!==c&&(fb(c),H(m.defaults.column,c),c.mDataProp!==k&&!c.mData&&(c.mData=c.mDataProp),c.sType&&
-(b._sManualType=c.sType),c.className&&!c.sClass&&(c.sClass=c.className),h.extend(b,c),E(b,c,"sWidth","sWidthOrig"),c.iDataSort!==k&&(b.aDataSort=[c.iDataSort]),E(b,c,"aDataSort"));var g=b.mData,j=R(g),i=b.mRender?R(b.mRender):null,c=function(a){return"string"===typeof a&&-1!==a.indexOf("@")};b._bAttrSrc=h.isPlainObject(g)&&(c(g.sort)||c(g.type)||c(g.filter));b.fnGetData=function(a,b,c){var e=j(a,b,k,c);return i&&b?i(e,b,a,c):e};b.fnSetData=function(a,b,c){return S(g)(a,b,c)};"number"!==typeof g&&
-(a._rowReadObject=!0);a.oFeatures.bSort||(b.bSortable=!1,d.addClass(e.sSortableNone));a=-1!==h.inArray("asc",b.asSorting);c=-1!==h.inArray("desc",b.asSorting);!b.bSortable||!a&&!c?(b.sSortingClass=e.sSortableNone,b.sSortingClassJUI=""):a&&!c?(b.sSortingClass=e.sSortableAsc,b.sSortingClassJUI=e.sSortJUIAscAllowed):!a&&c?(b.sSortingClass=e.sSortableDesc,b.sSortingClassJUI=e.sSortJUIDescAllowed):(b.sSortingClass=e.sSortable,b.sSortingClassJUI=e.sSortJUI)}function X(a){if(!1!==a.oFeatures.bAutoWidth){var b=
-a.aoColumns;Ga(a);for(var c=0,e=b.length;c<e;c++)b[c].nTh.style.width=b[c].sWidth}b=a.oScroll;(""!==b.sY||""!==b.sX)&&Y(a);w(a,null,"column-sizing",[a])}function la(a,b){var c=Z(a,"bVisible");return"number"===typeof c[b]?c[b]:null}function $(a,b){var c=Z(a,"bVisible"),c=h.inArray(b,c);return-1!==c?c:null}function aa(a){return Z(a,"bVisible").length}function Z(a,b){var c=[];h.map(a.aoColumns,function(a,d){a[b]&&c.push(d)});return c}function Ha(a){var b=a.aoColumns,c=a.aoData,e=m.ext.type.detect,d,
-f,g,j,i,h,l,q,n;d=0;for(f=b.length;d<f;d++)if(l=b[d],n=[],!l.sType&&l._sManualType)l.sType=l._sManualType;else if(!l.sType){g=0;for(j=e.length;g<j;g++){i=0;for(h=c.length;i<h;i++){n[i]===k&&(n[i]=x(a,i,d,"type"));q=e[g](n[i],a);if(!q&&g!==e.length-1)break;if("html"===q)break}if(q){l.sType=q;break}}l.sType||(l.sType="string")}}function ib(a,b,c,e){var d,f,g,j,i,o,l=a.aoColumns;if(b)for(d=b.length-1;0<=d;d--){o=b[d];var q=o.targets!==k?o.targets:o.aTargets;h.isArray(q)||(q=[q]);f=0;for(g=q.length;f<
-g;f++)if("number"===typeof q[f]&&0<=q[f]){for(;l.length<=q[f];)Fa(a);e(q[f],o)}else if("number"===typeof q[f]&&0>q[f])e(l.length+q[f],o);else if("string"===typeof q[f]){j=0;for(i=l.length;j<i;j++)("_all"==q[f]||h(l[j].nTh).hasClass(q[f]))&&e(j,o)}}if(c){d=0;for(a=c.length;d<a;d++)e(d,c[d])}}function K(a,b,c,e){var d=a.aoData.length,f=h.extend(!0,{},m.models.oRow,{src:c?"dom":"data"});f._aData=b;a.aoData.push(f);for(var b=a.aoColumns,f=0,g=b.length;f<g;f++)c&&Ia(a,d,f,x(a,d,f)),b[f].sType=null;a.aiDisplayMaster.push(d);
-(c||!a.oFeatures.bDeferRender)&&Ja(a,d,c,e);return d}function ma(a,b){var c;b instanceof h||(b=h(b));return b.map(function(b,d){c=na(a,d);return K(a,c.data,d,c.cells)})}function x(a,b,c,e){var d=a.iDraw,f=a.aoColumns[c],g=a.aoData[b]._aData,j=f.sDefaultContent,c=f.fnGetData(g,e,{settings:a,row:b,col:c});if(c===k)return a.iDrawError!=d&&null===j&&(I(a,0,"Requested unknown parameter "+("function"==typeof f.mData?"{function}":"'"+f.mData+"'")+" for row "+b,4),a.iDrawError=d),j;if((c===g||null===c)&&
-null!==j)c=j;else if("function"===typeof c)return c.call(g);return null===c&&"display"==e?"":c}function Ia(a,b,c,e){a.aoColumns[c].fnSetData(a.aoData[b]._aData,e,{settings:a,row:b,col:c})}function Ka(a){return h.map(a.match(/(\\.|[^\.])+/g),function(a){return a.replace(/\\./g,".")})}function R(a){if(h.isPlainObject(a)){var b={};h.each(a,function(a,c){c&&(b[a]=R(c))});return function(a,c,f,g){var j=b[c]||b._;return j!==k?j(a,c,f,g):a}}if(null===a)return function(a){return a};if("function"===typeof a)return function(b,
-c,f,g){return a(b,c,f,g)};if("string"===typeof a&&(-1!==a.indexOf(".")||-1!==a.indexOf("[")||-1!==a.indexOf("("))){var c=function(a,b,f){var g,j;if(""!==f){j=Ka(f);for(var i=0,h=j.length;i<h;i++){f=j[i].match(ba);g=j[i].match(T);if(f){j[i]=j[i].replace(ba,"");""!==j[i]&&(a=a[j[i]]);g=[];j.splice(0,i+1);j=j.join(".");i=0;for(h=a.length;i<h;i++)g.push(c(a[i],b,j));a=f[0].substring(1,f[0].length-1);a=""===a?g:g.join(a);break}else if(g){j[i]=j[i].replace(T,"");a=a[j[i]]();continue}if(null===a||a[j[i]]===
-k)return k;a=a[j[i]]}}return a};return function(b,d){return c(b,d,a)}}return function(b){return b[a]}}function S(a){if(h.isPlainObject(a))return S(a._);if(null===a)return function(){};if("function"===typeof a)return function(b,e,d){a(b,"set",e,d)};if("string"===typeof a&&(-1!==a.indexOf(".")||-1!==a.indexOf("[")||-1!==a.indexOf("("))){var b=function(a,e,d){var d=Ka(d),f;f=d[d.length-1];for(var g,j,i=0,h=d.length-1;i<h;i++){g=d[i].match(ba);j=d[i].match(T);if(g){d[i]=d[i].replace(ba,"");a[d[i]]=[];
-f=d.slice();f.splice(0,i+1);g=f.join(".");j=0;for(h=e.length;j<h;j++)f={},b(f,e[j],g),a[d[i]].push(f);return}j&&(d[i]=d[i].replace(T,""),a=a[d[i]](e));if(null===a[d[i]]||a[d[i]]===k)a[d[i]]={};a=a[d[i]]}if(f.match(T))a[f.replace(T,"")](e);else a[f.replace(ba,"")]=e};return function(c,e){return b(c,e,a)}}return function(b,e){b[a]=e}}function La(a){return D(a.aoData,"_aData")}function oa(a){a.aoData.length=0;a.aiDisplayMaster.length=0;a.aiDisplay.length=0}function pa(a,b,c){for(var e=-1,d=0,f=a.length;d<
-f;d++)a[d]==b?e=d:a[d]>b&&a[d]--; -1!=e&&c===k&&a.splice(e,1)}function ca(a,b,c,e){var d=a.aoData[b],f,g=function(c,f){for(;c.childNodes.length;)c.removeChild(c.firstChild);c.innerHTML=x(a,b,f,"display")};if("dom"===c||(!c||"auto"===c)&&"dom"===d.src)d._aData=na(a,d,e,e===k?k:d._aData).data;else{var j=d.anCells;if(j)if(e!==k)g(j[e],e);else{c=0;for(f=j.length;c<f;c++)g(j[c],c)}}d._aSortData=null;d._aFilterData=null;g=a.aoColumns;if(e!==k)g[e].sType=null;else{c=0;for(f=g.length;c<f;c++)g[c].sType=null;
-Ma(d)}}function na(a,b,c,e){var d=[],f=b.firstChild,g,j=0,i,o=a.aoColumns,l=a._rowReadObject,e=e||l?{}:[],q=function(a,b){if("string"===typeof a){var c=a.indexOf("@");-1!==c&&(c=a.substring(c+1),S(a)(e,b.getAttribute(c)))}},a=function(a){if(c===k||c===j)g=o[j],i=h.trim(a.innerHTML),g&&g._bAttrSrc?(S(g.mData._)(e,i),q(g.mData.sort,a),q(g.mData.type,a),q(g.mData.filter,a)):l?(g._setter||(g._setter=S(g.mData)),g._setter(e,i)):e[j]=i;j++};if(f)for(;f;){b=f.nodeName.toUpperCase();if("TD"==b||"TH"==b)a(f),
-d.push(f);f=f.nextSibling}else{d=b.anCells;f=0;for(b=d.length;f<b;f++)a(d[f])}return{data:e,cells:d}}function Ja(a,b,c,e){var d=a.aoData[b],f=d._aData,g=[],j,i,h,l,q;if(null===d.nTr){j=c||Q.createElement("tr");d.nTr=j;d.anCells=g;j._DT_RowIndex=b;Ma(d);l=0;for(q=a.aoColumns.length;l<q;l++){h=a.aoColumns[l];i=c?e[l]:Q.createElement(h.sCellType);g.push(i);if(!c||h.mRender||h.mData!==l)i.innerHTML=x(a,b,l,"display");h.sClass&&(i.className+=" "+h.sClass);h.bVisible&&!c?j.appendChild(i):!h.bVisible&&c&&
-i.parentNode.removeChild(i);h.fnCreatedCell&&h.fnCreatedCell.call(a.oInstance,i,x(a,b,l),f,b,l)}w(a,"aoRowCreatedCallback",null,[j,f,b])}d.nTr.setAttribute("role","row")}function Ma(a){var b=a.nTr,c=a._aData;if(b){c.DT_RowId&&(b.id=c.DT_RowId);if(c.DT_RowClass){var e=c.DT_RowClass.split(" ");a.__rowc=a.__rowc?Na(a.__rowc.concat(e)):e;h(b).removeClass(a.__rowc.join(" ")).addClass(c.DT_RowClass)}c.DT_RowAttr&&h(b).attr(c.DT_RowAttr);c.DT_RowData&&h(b).data(c.DT_RowData)}}function jb(a){var b,c,e,d,
-f,g=a.nTHead,j=a.nTFoot,i=0===h("th, td",g).length,o=a.oClasses,l=a.aoColumns;i&&(d=h("<tr/>").appendTo(g));b=0;for(c=l.length;b<c;b++)f=l[b],e=h(f.nTh).addClass(f.sClass),i&&e.appendTo(d),a.oFeatures.bSort&&(e.addClass(f.sSortingClass),!1!==f.bSortable&&(e.attr("tabindex",a.iTabIndex).attr("aria-controls",a.sTableId),Oa(a,f.nTh,b))),f.sTitle!=e.html()&&e.html(f.sTitle),Pa(a,"header")(a,e,f,o);i&&da(a.aoHeader,g);h(g).find(">tr").attr("role","row");h(g).find(">tr>th, >tr>td").addClass(o.sHeaderTH);
-h(j).find(">tr>th, >tr>td").addClass(o.sFooterTH);if(null!==j){a=a.aoFooter[0];b=0;for(c=a.length;b<c;b++)f=l[b],f.nTf=a[b].cell,f.sClass&&h(f.nTf).addClass(f.sClass)}}function ea(a,b,c){var e,d,f,g=[],j=[],i=a.aoColumns.length,o;if(b){c===k&&(c=!1);e=0;for(d=b.length;e<d;e++){g[e]=b[e].slice();g[e].nTr=b[e].nTr;for(f=i-1;0<=f;f--)!a.aoColumns[f].bVisible&&!c&&g[e].splice(f,1);j.push([])}e=0;for(d=g.length;e<d;e++){if(a=g[e].nTr)for(;f=a.firstChild;)a.removeChild(f);f=0;for(b=g[e].length;f<b;f++)if(o=
-i=1,j[e][f]===k){a.appendChild(g[e][f].cell);for(j[e][f]=1;g[e+i]!==k&&g[e][f].cell==g[e+i][f].cell;)j[e+i][f]=1,i++;for(;g[e][f+o]!==k&&g[e][f].cell==g[e][f+o].cell;){for(c=0;c<i;c++)j[e+c][f+o]=1;o++}h(g[e][f].cell).attr("rowspan",i).attr("colspan",o)}}}}function M(a){var b=w(a,"aoPreDrawCallback","preDraw",[a]);if(-1!==h.inArray(!1,b))C(a,!1);else{var b=[],c=0,e=a.asStripeClasses,d=e.length,f=a.oLanguage,g=a.iInitDisplayStart,j="ssp"==B(a),i=a.aiDisplay;a.bDrawing=!0;g!==k&&-1!==g&&(a._iDisplayStart=
-j?g:g>=a.fnRecordsDisplay()?0:g,a.iInitDisplayStart=-1);var g=a._iDisplayStart,o=a.fnDisplayEnd();if(a.bDeferLoading)a.bDeferLoading=!1,a.iDraw++,C(a,!1);else if(j){if(!a.bDestroying&&!kb(a))return}else a.iDraw++;if(0!==i.length){f=j?a.aoData.length:o;for(j=j?0:g;j<f;j++){var l=i[j],q=a.aoData[l];null===q.nTr&&Ja(a,l);l=q.nTr;if(0!==d){var n=e[c%d];q._sRowStripe!=n&&(h(l).removeClass(q._sRowStripe).addClass(n),q._sRowStripe=n)}w(a,"aoRowCallback",null,[l,q._aData,c,j]);b.push(l);c++}}else c=f.sZeroRecords,
-1==a.iDraw&&"ajax"==B(a)?c=f.sLoadingRecords:f.sEmptyTable&&0===a.fnRecordsTotal()&&(c=f.sEmptyTable),b[0]=h("<tr/>",{"class":d?e[0]:""}).append(h("<td />",{valign:"top",colSpan:aa(a),"class":a.oClasses.sRowEmpty}).html(c))[0];w(a,"aoHeaderCallback","header",[h(a.nTHead).children("tr")[0],La(a),g,o,i]);w(a,"aoFooterCallback","footer",[h(a.nTFoot).children("tr")[0],La(a),g,o,i]);e=h(a.nTBody);e.children().detach();e.append(h(b));w(a,"aoDrawCallback","draw",[a]);a.bSorted=!1;a.bFiltered=!1;a.bDrawing=
-!1}}function N(a,b){var c=a.oFeatures,e=c.bFilter;c.bSort&&lb(a);e?fa(a,a.oPreviousSearch):a.aiDisplay=a.aiDisplayMaster.slice();!0!==b&&(a._iDisplayStart=0);a._drawHold=b;M(a);a._drawHold=!1}function mb(a){var b=a.oClasses,c=h(a.nTable),c=h("<div/>").insertBefore(c),e=a.oFeatures,d=h("<div/>",{id:a.sTableId+"_wrapper","class":b.sWrapper+(a.nTFoot?"":" "+b.sNoFooter)});a.nHolding=c[0];a.nTableWrapper=d[0];a.nTableReinsertBefore=a.nTable.nextSibling;for(var f=a.sDom.split(""),g,j,i,o,l,q,n=0;n<f.length;n++){g=
-null;j=f[n];if("<"==j){i=h("<div/>")[0];o=f[n+1];if("'"==o||'"'==o){l="";for(q=2;f[n+q]!=o;)l+=f[n+q],q++;"H"==l?l=b.sJUIHeader:"F"==l&&(l=b.sJUIFooter);-1!=l.indexOf(".")?(o=l.split("."),i.id=o[0].substr(1,o[0].length-1),i.className=o[1]):"#"==l.charAt(0)?i.id=l.substr(1,l.length-1):i.className=l;n+=q}d.append(i);d=h(i)}else if(">"==j)d=d.parent();else if("l"==j&&e.bPaginate&&e.bLengthChange)g=nb(a);else if("f"==j&&e.bFilter)g=ob(a);else if("r"==j&&e.bProcessing)g=pb(a);else if("t"==j)g=qb(a);else if("i"==
-j&&e.bInfo)g=rb(a);else if("p"==j&&e.bPaginate)g=sb(a);else if(0!==m.ext.feature.length){i=m.ext.feature;q=0;for(o=i.length;q<o;q++)if(j==i[q].cFeature){g=i[q].fnInit(a);break}}g&&(i=a.aanFeatures,i[j]||(i[j]=[]),i[j].push(g),d.append(g))}c.replaceWith(d)}function da(a,b){var c=h(b).children("tr"),e,d,f,g,j,i,o,l,q,n;a.splice(0,a.length);f=0;for(i=c.length;f<i;f++)a.push([]);f=0;for(i=c.length;f<i;f++){e=c[f];for(d=e.firstChild;d;){if("TD"==d.nodeName.toUpperCase()||"TH"==d.nodeName.toUpperCase()){l=
-1*d.getAttribute("colspan");q=1*d.getAttribute("rowspan");l=!l||0===l||1===l?1:l;q=!q||0===q||1===q?1:q;g=0;for(j=a[f];j[g];)g++;o=g;n=1===l?!0:!1;for(j=0;j<l;j++)for(g=0;g<q;g++)a[f+g][o+j]={cell:d,unique:n},a[f+g].nTr=e}d=d.nextSibling}}}function qa(a,b,c){var e=[];c||(c=a.aoHeader,b&&(c=[],da(c,b)));for(var b=0,d=c.length;b<d;b++)for(var f=0,g=c[b].length;f<g;f++)if(c[b][f].unique&&(!e[f]||!a.bSortCellsTop))e[f]=c[b][f].cell;return e}function ra(a,b,c){w(a,"aoServerParams","serverParams",[b]);
-if(b&&h.isArray(b)){var e={},d=/(.*?)\[\]$/;h.each(b,function(a,b){var c=b.name.match(d);c?(c=c[0],e[c]||(e[c]=[]),e[c].push(b.value)):e[b.name]=b.value});b=e}var f,g=a.ajax,j=a.oInstance,i=function(b){w(a,null,"xhr",[a,b,a.jqXHR]);c(b)};if(h.isPlainObject(g)&&g.data){f=g.data;var o=h.isFunction(f)?f(b,a):f,b=h.isFunction(f)&&o?o:h.extend(!0,b,o);delete g.data}o={data:b,success:function(b){var c=b.error||b.sError;c&&I(a,0,c);a.json=b;i(b)},dataType:"json",cache:!1,type:a.sServerMethod,error:function(b,
-c){var f=w(a,null,"xhr",[a,null,a.jqXHR]);-1===h.inArray(!0,f)&&("parsererror"==c?I(a,0,"Invalid JSON response",1):4===b.readyState&&I(a,0,"Ajax error",7));C(a,!1)}};a.oAjaxData=b;w(a,null,"preXhr",[a,b]);a.fnServerData?a.fnServerData.call(j,a.sAjaxSource,h.map(b,function(a,b){return{name:b,value:a}}),i,a):a.sAjaxSource||"string"===typeof g?a.jqXHR=h.ajax(h.extend(o,{url:g||a.sAjaxSource})):h.isFunction(g)?a.jqXHR=g.call(j,b,i,a):(a.jqXHR=h.ajax(h.extend(o,g)),g.data=f)}function kb(a){return a.bAjaxDataGet?
-(a.iDraw++,C(a,!0),ra(a,tb(a),function(b){ub(a,b)}),!1):!0}function tb(a){var b=a.aoColumns,c=b.length,e=a.oFeatures,d=a.oPreviousSearch,f=a.aoPreSearchCols,g,j=[],i,o,l,q=U(a);g=a._iDisplayStart;i=!1!==e.bPaginate?a._iDisplayLength:-1;var n=function(a,b){j.push({name:a,value:b})};n("sEcho",a.iDraw);n("iColumns",c);n("sColumns",D(b,"sName").join(","));n("iDisplayStart",g);n("iDisplayLength",i);var k={draw:a.iDraw,columns:[],order:[],start:g,length:i,search:{value:d.sSearch,regex:d.bRegex}};for(g=
-0;g<c;g++)o=b[g],l=f[g],i="function"==typeof o.mData?"function":o.mData,k.columns.push({data:i,name:o.sName,searchable:o.bSearchable,orderable:o.bSortable,search:{value:l.sSearch,regex:l.bRegex}}),n("mDataProp_"+g,i),e.bFilter&&(n("sSearch_"+g,l.sSearch),n("bRegex_"+g,l.bRegex),n("bSearchable_"+g,o.bSearchable)),e.bSort&&n("bSortable_"+g,o.bSortable);e.bFilter&&(n("sSearch",d.sSearch),n("bRegex",d.bRegex));e.bSort&&(h.each(q,function(a,b){k.order.push({column:b.col,dir:b.dir});n("iSortCol_"+a,b.col);
-n("sSortDir_"+a,b.dir)}),n("iSortingCols",q.length));b=m.ext.legacy.ajax;return null===b?a.sAjaxSource?j:k:b?j:k}function ub(a,b){var c=sa(a,b),e=b.sEcho!==k?b.sEcho:b.draw,d=b.iTotalRecords!==k?b.iTotalRecords:b.recordsTotal,f=b.iTotalDisplayRecords!==k?b.iTotalDisplayRecords:b.recordsFiltered;if(e){if(1*e<a.iDraw)return;a.iDraw=1*e}oa(a);a._iRecordsTotal=parseInt(d,10);a._iRecordsDisplay=parseInt(f,10);e=0;for(d=c.length;e<d;e++)K(a,c[e]);a.aiDisplay=a.aiDisplayMaster.slice();a.bAjaxDataGet=!1;
-M(a);a._bInitComplete||ta(a,b);a.bAjaxDataGet=!0;C(a,!1)}function sa(a,b){var c=h.isPlainObject(a.ajax)&&a.ajax.dataSrc!==k?a.ajax.dataSrc:a.sAjaxDataProp;return"data"===c?b.aaData||b[c]:""!==c?R(c)(b):b}function ob(a){var b=a.oClasses,c=a.sTableId,e=a.oLanguage,d=a.oPreviousSearch,f=a.aanFeatures,g='<input type="search" class="'+b.sFilterInput+'"/>',j=e.sSearch,j=j.match(/_INPUT_/)?j.replace("_INPUT_",g):j+g,b=h("<div/>",{id:!f.f?c+"_filter":null,"class":b.sFilter}).append(h("<label/>").append(j)),
-f=function(){var b=!this.value?"":this.value;b!=d.sSearch&&(fa(a,{sSearch:b,bRegex:d.bRegex,bSmart:d.bSmart,bCaseInsensitive:d.bCaseInsensitive}),a._iDisplayStart=0,M(a))},g=null!==a.searchDelay?a.searchDelay:"ssp"===B(a)?400:0,i=h("input",b).val(d.sSearch).attr("placeholder",e.sSearchPlaceholder).bind("keyup.DT search.DT input.DT paste.DT cut.DT",g?ua(f,g):f).bind("keypress.DT",function(a){if(13==a.keyCode)return!1}).attr("aria-controls",c);h(a.nTable).on("search.dt.DT",function(b,c){if(a===c)try{i[0]!==
-Q.activeElement&&i.val(d.sSearch)}catch(f){}});return b[0]}function fa(a,b,c){var e=a.oPreviousSearch,d=a.aoPreSearchCols,f=function(a){e.sSearch=a.sSearch;e.bRegex=a.bRegex;e.bSmart=a.bSmart;e.bCaseInsensitive=a.bCaseInsensitive};Ha(a);if("ssp"!=B(a)){vb(a,b.sSearch,c,b.bEscapeRegex!==k?!b.bEscapeRegex:b.bRegex,b.bSmart,b.bCaseInsensitive);f(b);for(b=0;b<d.length;b++)wb(a,d[b].sSearch,b,d[b].bEscapeRegex!==k?!d[b].bEscapeRegex:d[b].bRegex,d[b].bSmart,d[b].bCaseInsensitive);xb(a)}else f(b);a.bFiltered=
-!0;w(a,null,"search",[a])}function xb(a){for(var b=m.ext.search,c=a.aiDisplay,e,d,f=0,g=b.length;f<g;f++){for(var j=[],i=0,h=c.length;i<h;i++)d=c[i],e=a.aoData[d],b[f](a,e._aFilterData,d,e._aData,i)&&j.push(d);c.length=0;c.push.apply(c,j)}}function wb(a,b,c,e,d,f){if(""!==b)for(var g=a.aiDisplay,e=Qa(b,e,d,f),d=g.length-1;0<=d;d--)b=a.aoData[g[d]]._aFilterData[c],e.test(b)||g.splice(d,1)}function vb(a,b,c,e,d,f){var e=Qa(b,e,d,f),d=a.oPreviousSearch.sSearch,f=a.aiDisplayMaster,g;0!==m.ext.search.length&&
-(c=!0);g=yb(a);if(0>=b.length)a.aiDisplay=f.slice();else{if(g||c||d.length>b.length||0!==b.indexOf(d)||a.bSorted)a.aiDisplay=f.slice();b=a.aiDisplay;for(c=b.length-1;0<=c;c--)e.test(a.aoData[b[c]]._sFilterRow)||b.splice(c,1)}}function Qa(a,b,c,e){a=b?a:va(a);c&&(a="^(?=.*?"+h.map(a.match(/"[^"]+"|[^ ]+/g)||[""],function(a){if('"'===a.charAt(0))var b=a.match(/^"(.*)"$/),a=b?b[1]:a;return a.replace('"',"")}).join(")(?=.*?")+").*$");return RegExp(a,e?"i":"")}function va(a){return a.replace(Yb,"\\$1")}
-function yb(a){var b=a.aoColumns,c,e,d,f,g,j,i,h,l=m.ext.type.search;c=!1;e=0;for(f=a.aoData.length;e<f;e++)if(h=a.aoData[e],!h._aFilterData){j=[];d=0;for(g=b.length;d<g;d++)c=b[d],c.bSearchable?(i=x(a,e,d,"filter"),l[c.sType]&&(i=l[c.sType](i)),null===i&&(i=""),"string"!==typeof i&&i.toString&&(i=i.toString())):i="",i.indexOf&&-1!==i.indexOf("&")&&(wa.innerHTML=i,i=Zb?wa.textContent:wa.innerText),i.replace&&(i=i.replace(/[\r\n]/g,"")),j.push(i);h._aFilterData=j;h._sFilterRow=j.join("  ");c=!0}return c}
-function zb(a){return{search:a.sSearch,smart:a.bSmart,regex:a.bRegex,caseInsensitive:a.bCaseInsensitive}}function Ab(a){return{sSearch:a.search,bSmart:a.smart,bRegex:a.regex,bCaseInsensitive:a.caseInsensitive}}function rb(a){var b=a.sTableId,c=a.aanFeatures.i,e=h("<div/>",{"class":a.oClasses.sInfo,id:!c?b+"_info":null});c||(a.aoDrawCallback.push({fn:Bb,sName:"information"}),e.attr("role","status").attr("aria-live","polite"),h(a.nTable).attr("aria-describedby",b+"_info"));return e[0]}function Bb(a){var b=
-a.aanFeatures.i;if(0!==b.length){var c=a.oLanguage,e=a._iDisplayStart+1,d=a.fnDisplayEnd(),f=a.fnRecordsTotal(),g=a.fnRecordsDisplay(),j=g?c.sInfo:c.sInfoEmpty;g!==f&&(j+=" "+c.sInfoFiltered);j+=c.sInfoPostFix;j=Cb(a,j);c=c.fnInfoCallback;null!==c&&(j=c.call(a.oInstance,a,e,d,f,g,j));h(b).html(j)}}function Cb(a,b){var c=a.fnFormatNumber,e=a._iDisplayStart+1,d=a._iDisplayLength,f=a.fnRecordsDisplay(),g=-1===d;return b.replace(/_START_/g,c.call(a,e)).replace(/_END_/g,c.call(a,a.fnDisplayEnd())).replace(/_MAX_/g,
-c.call(a,a.fnRecordsTotal())).replace(/_TOTAL_/g,c.call(a,f)).replace(/_PAGE_/g,c.call(a,g?1:Math.ceil(e/d))).replace(/_PAGES_/g,c.call(a,g?1:Math.ceil(f/d)))}function ga(a){var b,c,e=a.iInitDisplayStart,d=a.aoColumns,f;c=a.oFeatures;if(a.bInitialised){mb(a);jb(a);ea(a,a.aoHeader);ea(a,a.aoFooter);C(a,!0);c.bAutoWidth&&Ga(a);b=0;for(c=d.length;b<c;b++)f=d[b],f.sWidth&&(f.nTh.style.width=s(f.sWidth));N(a);d=B(a);"ssp"!=d&&("ajax"==d?ra(a,[],function(c){var f=sa(a,c);for(b=0;b<f.length;b++)K(a,f[b]);
-a.iInitDisplayStart=e;N(a);C(a,!1);ta(a,c)},a):(C(a,!1),ta(a)))}else setTimeout(function(){ga(a)},200)}function ta(a,b){a._bInitComplete=!0;b&&X(a);w(a,"aoInitComplete","init",[a,b])}function Ra(a,b){var c=parseInt(b,10);a._iDisplayLength=c;Sa(a);w(a,null,"length",[a,c])}function nb(a){for(var b=a.oClasses,c=a.sTableId,e=a.aLengthMenu,d=h.isArray(e[0]),f=d?e[0]:e,e=d?e[1]:e,d=h("<select/>",{name:c+"_length","aria-controls":c,"class":b.sLengthSelect}),g=0,j=f.length;g<j;g++)d[0][g]=new Option(e[g],
-f[g]);var i=h("<div><label/></div>").addClass(b.sLength);a.aanFeatures.l||(i[0].id=c+"_length");i.children().append(a.oLanguage.sLengthMenu.replace("_MENU_",d[0].outerHTML));h("select",i).val(a._iDisplayLength).bind("change.DT",function(){Ra(a,h(this).val());M(a)});h(a.nTable).bind("length.dt.DT",function(b,c,f){a===c&&h("select",i).val(f)});return i[0]}function sb(a){var b=a.sPaginationType,c=m.ext.pager[b],e="function"===typeof c,d=function(a){M(a)},b=h("<div/>").addClass(a.oClasses.sPaging+b)[0],
-f=a.aanFeatures;e||c.fnInit(a,b,d);f.p||(b.id=a.sTableId+"_paginate",a.aoDrawCallback.push({fn:function(a){if(e){var b=a._iDisplayStart,i=a._iDisplayLength,h=a.fnRecordsDisplay(),l=-1===i,b=l?0:Math.ceil(b/i),i=l?1:Math.ceil(h/i),h=c(b,i),q,l=0;for(q=f.p.length;l<q;l++)Pa(a,"pageButton")(a,f.p[l],l,h,b,i)}else c.fnUpdate(a,d)},sName:"pagination"}));return b}function Ta(a,b,c){var e=a._iDisplayStart,d=a._iDisplayLength,f=a.fnRecordsDisplay();0===f||-1===d?e=0:"number"===typeof b?(e=b*d,e>f&&(e=0)):
-"first"==b?e=0:"previous"==b?(e=0<=d?e-d:0,0>e&&(e=0)):"next"==b?e+d<f&&(e+=d):"last"==b?e=Math.floor((f-1)/d)*d:I(a,0,"Unknown paging action: "+b,5);b=a._iDisplayStart!==e;a._iDisplayStart=e;b&&(w(a,null,"page",[a]),c&&M(a));return b}function pb(a){return h("<div/>",{id:!a.aanFeatures.r?a.sTableId+"_processing":null,"class":a.oClasses.sProcessing}).html(a.oLanguage.sProcessing).insertBefore(a.nTable)[0]}function C(a,b){a.oFeatures.bProcessing&&h(a.aanFeatures.r).css("display",b?"block":"none");w(a,
-null,"processing",[a,b])}function qb(a){var b=h(a.nTable);b.attr("role","grid");var c=a.oScroll;if(""===c.sX&&""===c.sY)return a.nTable;var e=c.sX,d=c.sY,f=a.oClasses,g=b.children("caption"),j=g.length?g[0]._captionSide:null,i=h(b[0].cloneNode(!1)),o=h(b[0].cloneNode(!1)),l=b.children("tfoot");c.sX&&"100%"===b.attr("width")&&b.removeAttr("width");l.length||(l=null);c=h("<div/>",{"class":f.sScrollWrapper}).append(h("<div/>",{"class":f.sScrollHead}).css({overflow:"hidden",position:"relative",border:0,
-width:e?!e?null:s(e):"100%"}).append(h("<div/>",{"class":f.sScrollHeadInner}).css({"box-sizing":"content-box",width:c.sXInner||"100%"}).append(i.removeAttr("id").css("margin-left",0).append("top"===j?g:null).append(b.children("thead"))))).append(h("<div/>",{"class":f.sScrollBody}).css({overflow:"auto",height:!d?null:s(d),width:!e?null:s(e)}).append(b));l&&c.append(h("<div/>",{"class":f.sScrollFoot}).css({overflow:"hidden",border:0,width:e?!e?null:s(e):"100%"}).append(h("<div/>",{"class":f.sScrollFootInner}).append(o.removeAttr("id").css("margin-left",
-0).append("bottom"===j?g:null).append(b.children("tfoot")))));var b=c.children(),q=b[0],f=b[1],n=l?b[2]:null;if(e)h(f).on("scroll.DT",function(){var a=this.scrollLeft;q.scrollLeft=a;l&&(n.scrollLeft=a)});a.nScrollHead=q;a.nScrollBody=f;a.nScrollFoot=n;a.aoDrawCallback.push({fn:Y,sName:"scrolling"});return c[0]}function Y(a){var b=a.oScroll,c=b.sX,e=b.sXInner,d=b.sY,f=b.iBarWidth,g=h(a.nScrollHead),j=g[0].style,i=g.children("div"),o=i[0].style,l=i.children("table"),i=a.nScrollBody,q=h(i),n=i.style,
-k=h(a.nScrollFoot).children("div"),p=k.children("table"),m=h(a.nTHead),r=h(a.nTable),t=r[0],O=t.style,L=a.nTFoot?h(a.nTFoot):null,ha=a.oBrowser,w=ha.bScrollOversize,v,u,y,x,z,A=[],B=[],C=[],D,E=function(a){a=a.style;a.paddingTop="0";a.paddingBottom="0";a.borderTopWidth="0";a.borderBottomWidth="0";a.height=0};r.children("thead, tfoot").remove();z=m.clone().prependTo(r);v=m.find("tr");y=z.find("tr");z.find("th, td").removeAttr("tabindex");L&&(x=L.clone().prependTo(r),u=L.find("tr"),x=x.find("tr"));
-c||(n.width="100%",g[0].style.width="100%");h.each(qa(a,z),function(b,c){D=la(a,b);c.style.width=a.aoColumns[D].sWidth});L&&G(function(a){a.style.width=""},x);b.bCollapse&&""!==d&&(n.height=q[0].offsetHeight+m[0].offsetHeight+"px");g=r.outerWidth();if(""===c){if(O.width="100%",w&&(r.find("tbody").height()>i.offsetHeight||"scroll"==q.css("overflow-y")))O.width=s(r.outerWidth()-f)}else""!==e?O.width=s(e):g==q.width()&&q.height()<r.height()?(O.width=s(g-f),r.outerWidth()>g-f&&(O.width=s(g))):O.width=
-s(g);g=r.outerWidth();G(E,y);G(function(a){C.push(a.innerHTML);A.push(s(h(a).css("width")))},y);G(function(a,b){a.style.width=A[b]},v);h(y).height(0);L&&(G(E,x),G(function(a){B.push(s(h(a).css("width")))},x),G(function(a,b){a.style.width=B[b]},u),h(x).height(0));G(function(a,b){a.innerHTML='<div class="dataTables_sizing" style="height:0;overflow:hidden;">'+C[b]+"</div>";a.style.width=A[b]},y);L&&G(function(a,b){a.innerHTML="";a.style.width=B[b]},x);if(r.outerWidth()<g){u=i.scrollHeight>i.offsetHeight||
-"scroll"==q.css("overflow-y")?g+f:g;if(w&&(i.scrollHeight>i.offsetHeight||"scroll"==q.css("overflow-y")))O.width=s(u-f);(""===c||""!==e)&&I(a,1,"Possible column misalignment",6)}else u="100%";n.width=s(u);j.width=s(u);L&&(a.nScrollFoot.style.width=s(u));!d&&w&&(n.height=s(t.offsetHeight+f));d&&b.bCollapse&&(n.height=s(d),b=c&&t.offsetWidth>i.offsetWidth?f:0,t.offsetHeight<i.offsetHeight&&(n.height=s(t.offsetHeight+b)));b=r.outerWidth();l[0].style.width=s(b);o.width=s(b);l=r.height()>i.clientHeight||
-"scroll"==q.css("overflow-y");ha="padding"+(ha.bScrollbarLeft?"Left":"Right");o[ha]=l?f+"px":"0px";L&&(p[0].style.width=s(b),k[0].style.width=s(b),k[0].style[ha]=l?f+"px":"0px");q.scroll();if((a.bSorted||a.bFiltered)&&!a._drawHold)i.scrollTop=0}function G(a,b,c){for(var e=0,d=0,f=b.length,g,j;d<f;){g=b[d].firstChild;for(j=c?c[d].firstChild:null;g;)1===g.nodeType&&(c?a(g,j,e):a(g,e),e++),g=g.nextSibling,j=c?j.nextSibling:null;d++}}function Ga(a){var b=a.nTable,c=a.aoColumns,e=a.oScroll,d=e.sY,f=e.sX,
-g=e.sXInner,j=c.length,e=Z(a,"bVisible"),i=h("th",a.nTHead),o=b.getAttribute("width"),l=b.parentNode,k=!1,n,m;(n=b.style.width)&&-1!==n.indexOf("%")&&(o=n);for(n=0;n<e.length;n++)m=c[e[n]],null!==m.sWidth&&(m.sWidth=Db(m.sWidthOrig,l),k=!0);if(!k&&!f&&!d&&j==aa(a)&&j==i.length)for(n=0;n<j;n++)c[n].sWidth=s(i.eq(n).width());else{j=h(b).clone().css("visibility","hidden").removeAttr("id");j.find("tbody tr").remove();var p=h("<tr/>").appendTo(j.find("tbody"));j.find("tfoot th, tfoot td").css("width",
-"");i=qa(a,j.find("thead")[0]);for(n=0;n<e.length;n++)m=c[e[n]],i[n].style.width=null!==m.sWidthOrig&&""!==m.sWidthOrig?s(m.sWidthOrig):"";if(a.aoData.length)for(n=0;n<e.length;n++)k=e[n],m=c[k],h(Eb(a,k)).clone(!1).append(m.sContentPadding).appendTo(p);j.appendTo(l);f&&g?j.width(g):f?(j.css("width","auto"),j.width()<l.offsetWidth&&j.width(l.offsetWidth)):d?j.width(l.offsetWidth):o&&j.width(o);Fb(a,j[0]);if(f){for(n=g=0;n<e.length;n++)m=c[e[n]],d=h(i[n]).outerWidth(),g+=null===m.sWidthOrig?d:parseInt(m.sWidth,
-10)+d-h(i[n]).width();j.width(s(g));b.style.width=s(g)}for(n=0;n<e.length;n++)if(m=c[e[n]],d=h(i[n]).width())m.sWidth=s(d);b.style.width=s(j.css("width"));j.remove()}o&&(b.style.width=s(o));if((o||f)&&!a._reszEvt)b=function(){h(Ea).bind("resize.DT-"+a.sInstance,ua(function(){X(a)}))},a.oBrowser.bScrollOversize?setTimeout(b,1E3):b(),a._reszEvt=!0}function ua(a,b){var c=b!==k?b:200,e,d;return function(){var b=this,g=+new Date,j=arguments;e&&g<e+c?(clearTimeout(d),d=setTimeout(function(){e=k;a.apply(b,
-j)},c)):(e=g,a.apply(b,j))}}function Db(a,b){if(!a)return 0;var c=h("<div/>").css("width",s(a)).appendTo(b||Q.body),e=c[0].offsetWidth;c.remove();return e}function Fb(a,b){var c=a.oScroll;if(c.sX||c.sY)c=!c.sX?c.iBarWidth:0,b.style.width=s(h(b).outerWidth()-c)}function Eb(a,b){var c=Gb(a,b);if(0>c)return null;var e=a.aoData[c];return!e.nTr?h("<td/>").html(x(a,c,b,"display"))[0]:e.anCells[b]}function Gb(a,b){for(var c,e=-1,d=-1,f=0,g=a.aoData.length;f<g;f++)c=x(a,f,b,"display")+"",c=c.replace($b,""),
-c.length>e&&(e=c.length,d=f);return d}function s(a){return null===a?"0px":"number"==typeof a?0>a?"0px":a+"px":a.match(/\d$/)?a+"px":a}function Hb(){var a=m.__scrollbarWidth;if(a===k){var b=h("<p/>").css({position:"absolute",top:0,left:0,width:"100%",height:150,padding:0,overflow:"scroll",visibility:"hidden"}).appendTo("body"),a=b[0].offsetWidth-b[0].clientWidth;m.__scrollbarWidth=a;b.remove()}return a}function U(a){var b,c,e=[],d=a.aoColumns,f,g,j,i;b=a.aaSortingFixed;c=h.isPlainObject(b);var o=[];
-f=function(a){a.length&&!h.isArray(a[0])?o.push(a):o.push.apply(o,a)};h.isArray(b)&&f(b);c&&b.pre&&f(b.pre);f(a.aaSorting);c&&b.post&&f(b.post);for(a=0;a<o.length;a++){i=o[a][0];f=d[i].aDataSort;b=0;for(c=f.length;b<c;b++)g=f[b],j=d[g].sType||"string",o[a]._idx===k&&(o[a]._idx=h.inArray(o[a][1],d[g].asSorting)),e.push({src:i,col:g,dir:o[a][1],index:o[a]._idx,type:j,formatter:m.ext.type.order[j+"-pre"]})}return e}function lb(a){var b,c,e=[],d=m.ext.type.order,f=a.aoData,g=0,j,i=a.aiDisplayMaster,h;
-Ha(a);h=U(a);b=0;for(c=h.length;b<c;b++)j=h[b],j.formatter&&g++,Ib(a,j.col);if("ssp"!=B(a)&&0!==h.length){b=0;for(c=i.length;b<c;b++)e[i[b]]=b;g===h.length?i.sort(function(a,b){var c,d,g,j,i=h.length,k=f[a]._aSortData,m=f[b]._aSortData;for(g=0;g<i;g++)if(j=h[g],c=k[j.col],d=m[j.col],c=c<d?-1:c>d?1:0,0!==c)return"asc"===j.dir?c:-c;c=e[a];d=e[b];return c<d?-1:c>d?1:0}):i.sort(function(a,b){var c,g,j,i,k=h.length,m=f[a]._aSortData,r=f[b]._aSortData;for(j=0;j<k;j++)if(i=h[j],c=m[i.col],g=r[i.col],i=d[i.type+
-"-"+i.dir]||d["string-"+i.dir],c=i(c,g),0!==c)return c;c=e[a];g=e[b];return c<g?-1:c>g?1:0})}a.bSorted=!0}function Jb(a){for(var b,c,e=a.aoColumns,d=U(a),a=a.oLanguage.oAria,f=0,g=e.length;f<g;f++){c=e[f];var j=c.asSorting;b=c.sTitle.replace(/<.*?>/g,"");var i=c.nTh;i.removeAttribute("aria-sort");c.bSortable&&(0<d.length&&d[0].col==f?(i.setAttribute("aria-sort","asc"==d[0].dir?"ascending":"descending"),c=j[d[0].index+1]||j[0]):c=j[0],b+="asc"===c?a.sSortAscending:a.sSortDescending);i.setAttribute("aria-label",
-b)}}function Ua(a,b,c,e){var d=a.aaSorting,f=a.aoColumns[b].asSorting,g=function(a,b){var c=a._idx;c===k&&(c=h.inArray(a[1],f));return c+1<f.length?c+1:b?null:0};"number"===typeof d[0]&&(d=a.aaSorting=[d]);c&&a.oFeatures.bSortMulti?(c=h.inArray(b,D(d,"0")),-1!==c?(b=g(d[c],!0),null===b&&1===d.length&&(b=0),null===b?d.splice(c,1):(d[c][1]=f[b],d[c]._idx=b)):(d.push([b,f[0],0]),d[d.length-1]._idx=0)):d.length&&d[0][0]==b?(b=g(d[0]),d.length=1,d[0][1]=f[b],d[0]._idx=b):(d.length=0,d.push([b,f[0]]),d[0]._idx=
-0);N(a);"function"==typeof e&&e(a)}function Oa(a,b,c,e){var d=a.aoColumns[c];Va(b,{},function(b){!1!==d.bSortable&&(a.oFeatures.bProcessing?(C(a,!0),setTimeout(function(){Ua(a,c,b.shiftKey,e);"ssp"!==B(a)&&C(a,!1)},0)):Ua(a,c,b.shiftKey,e))})}function xa(a){var b=a.aLastSort,c=a.oClasses.sSortColumn,e=U(a),d=a.oFeatures,f,g;if(d.bSort&&d.bSortClasses){d=0;for(f=b.length;d<f;d++)g=b[d].src,h(D(a.aoData,"anCells",g)).removeClass(c+(2>d?d+1:3));d=0;for(f=e.length;d<f;d++)g=e[d].src,h(D(a.aoData,"anCells",
-g)).addClass(c+(2>d?d+1:3))}a.aLastSort=e}function Ib(a,b){var c=a.aoColumns[b],e=m.ext.order[c.sSortDataType],d;e&&(d=e.call(a.oInstance,a,b,$(a,b)));for(var f,g=m.ext.type.order[c.sType+"-pre"],j=0,i=a.aoData.length;j<i;j++)if(c=a.aoData[j],c._aSortData||(c._aSortData=[]),!c._aSortData[b]||e)f=e?d[j]:x(a,j,b,"sort"),c._aSortData[b]=g?g(f):f}function ya(a){if(a.oFeatures.bStateSave&&!a.bDestroying){var b={time:+new Date,start:a._iDisplayStart,length:a._iDisplayLength,order:h.extend(!0,[],a.aaSorting),
-search:zb(a.oPreviousSearch),columns:h.map(a.aoColumns,function(b,e){return{visible:b.bVisible,search:zb(a.aoPreSearchCols[e])}})};w(a,"aoStateSaveParams","stateSaveParams",[a,b]);a.oSavedState=b;a.fnStateSaveCallback.call(a.oInstance,a,b)}}function Kb(a){var b,c,e=a.aoColumns;if(a.oFeatures.bStateSave){var d=a.fnStateLoadCallback.call(a.oInstance,a);if(d&&d.time&&(b=w(a,"aoStateLoadParams","stateLoadParams",[a,d]),-1===h.inArray(!1,b)&&(b=a.iStateDuration,!(0<b&&d.time<+new Date-1E3*b)&&e.length===
-d.columns.length))){a.oLoadedState=h.extend(!0,{},d);d.start!==k&&(a._iDisplayStart=d.start,a.iInitDisplayStart=d.start);d.length!==k&&(a._iDisplayLength=d.length);d.order!==k&&(a.aaSorting=[],h.each(d.order,function(b,c){a.aaSorting.push(c[0]>=e.length?[0,c[1]]:c)}));d.search!==k&&h.extend(a.oPreviousSearch,Ab(d.search));b=0;for(c=d.columns.length;b<c;b++){var f=d.columns[b];f.visible!==k&&(e[b].bVisible=f.visible);f.search!==k&&h.extend(a.aoPreSearchCols[b],Ab(f.search))}w(a,"aoStateLoaded","stateLoaded",
-[a,d])}}}function za(a){var b=m.settings,a=h.inArray(a,D(b,"nTable"));return-1!==a?b[a]:null}function I(a,b,c,e){c="DataTables warning: "+(null!==a?"table id="+a.sTableId+" - ":"")+c;e&&(c+=". For more information about this error, please see http://datatables.net/tn/"+e);if(b)Ea.console&&console.log&&console.log(c);else if(b=m.ext,b=b.sErrMode||b.errMode,w(a,null,"error",[a,e,c]),"alert"==b)alert(c);else{if("throw"==b)throw Error(c);"function"==typeof b&&b(a,e,c)}}function E(a,b,c,e){h.isArray(c)?
-h.each(c,function(c,f){h.isArray(f)?E(a,b,f[0],f[1]):E(a,b,f)}):(e===k&&(e=c),b[c]!==k&&(a[e]=b[c]))}function Lb(a,b,c){var e,d;for(d in b)b.hasOwnProperty(d)&&(e=b[d],h.isPlainObject(e)?(h.isPlainObject(a[d])||(a[d]={}),h.extend(!0,a[d],e)):a[d]=c&&"data"!==d&&"aaData"!==d&&h.isArray(e)?e.slice():e);return a}function Va(a,b,c){h(a).bind("click.DT",b,function(b){a.blur();c(b)}).bind("keypress.DT",b,function(a){13===a.which&&(a.preventDefault(),c(a))}).bind("selectstart.DT",function(){return!1})}function z(a,
-b,c,e){c&&a[b].push({fn:c,sName:e})}function w(a,b,c,e){var d=[];b&&(d=h.map(a[b].slice().reverse(),function(b){return b.fn.apply(a.oInstance,e)}));null!==c&&(b=h.Event(c+".dt"),h(a.nTable).trigger(b,e),d.push(b.result));return d}function Sa(a){var b=a._iDisplayStart,c=a.fnDisplayEnd(),e=a._iDisplayLength;b>=c&&(b=c-e);b-=b%e;if(-1===e||0>b)b=0;a._iDisplayStart=b}function Pa(a,b){var c=a.renderer,e=m.ext.renderer[b];return h.isPlainObject(c)&&c[b]?e[c[b]]||e._:"string"===typeof c?e[c]||e._:e._}function B(a){return a.oFeatures.bServerSide?
-"ssp":a.ajax||a.sAjaxSource?"ajax":"dom"}function Wa(a,b){var c=[],c=Mb.numbers_length,e=Math.floor(c/2);b<=c?c=V(0,b):a<=e?(c=V(0,c-2),c.push("ellipsis"),c.push(b-1)):(a>=b-1-e?c=V(b-(c-2),b):(c=V(a-e+2,a+e-1),c.push("ellipsis"),c.push(b-1)),c.splice(0,0,"ellipsis"),c.splice(0,0,0));c.DT_el="span";return c}function db(a){h.each({num:function(b){return Aa(b,a)},"num-fmt":function(b){return Aa(b,a,Xa)},"html-num":function(b){return Aa(b,a,Ba)},"html-num-fmt":function(b){return Aa(b,a,Ba,Xa)}},function(b,
-c){u.type.order[b+a+"-pre"]=c;b.match(/^html\-/)&&(u.type.search[b+a]=u.type.search.html)})}function Nb(a){return function(){var b=[za(this[m.ext.iApiIndex])].concat(Array.prototype.slice.call(arguments));return m.ext.internal[a].apply(this,b)}}var m,u,t,r,v,Ya={},Ob=/[\r\n]/g,Ba=/<.*?>/g,ac=/^[\w\+\-]/,bc=/[\w\+\-]$/,Yb=RegExp("(\\/|\\.|\\*|\\+|\\?|\\||\\(|\\)|\\[|\\]|\\{|\\}|\\\\|\\$|\\^|\\-)","g"),Xa=/[',$\u00a3\u20ac\u00a5%\u2009\u202F\u20BD\u20a9\u20BArfk]/gi,J=function(a){return!a||!0===a||
-"-"===a?!0:!1},Pb=function(a){var b=parseInt(a,10);return!isNaN(b)&&isFinite(a)?b:null},Qb=function(a,b){Ya[b]||(Ya[b]=RegExp(va(b),"g"));return"string"===typeof a&&"."!==b?a.replace(/\./g,"").replace(Ya[b],"."):a},Za=function(a,b,c){var e="string"===typeof a;if(J(a))return!0;b&&e&&(a=Qb(a,b));c&&e&&(a=a.replace(Xa,""));return!isNaN(parseFloat(a))&&isFinite(a)},Rb=function(a,b,c){return J(a)?!0:!(J(a)||"string"===typeof a)?null:Za(a.replace(Ba,""),b,c)?!0:null},D=function(a,b,c){var e=[],d=0,f=a.length;
-if(c!==k)for(;d<f;d++)a[d]&&a[d][b]&&e.push(a[d][b][c]);else for(;d<f;d++)a[d]&&e.push(a[d][b]);return e},ia=function(a,b,c,e){var d=[],f=0,g=b.length;if(e!==k)for(;f<g;f++)a[b[f]][c]&&d.push(a[b[f]][c][e]);else for(;f<g;f++)d.push(a[b[f]][c]);return d},V=function(a,b){var c=[],e;b===k?(b=0,e=a):(e=b,b=a);for(var d=b;d<e;d++)c.push(d);return c},Sb=function(a){for(var b=[],c=0,e=a.length;c<e;c++)a[c]&&b.push(a[c]);return b},Na=function(a){var b=[],c,e,d=a.length,f,g=0;e=0;a:for(;e<d;e++){c=a[e];for(f=
-0;f<g;f++)if(b[f]===c)continue a;b.push(c);g++}return b},A=function(a,b,c){a[b]!==k&&(a[c]=a[b])},ba=/\[.*?\]$/,T=/\(\)$/,wa=h("<div>")[0],Zb=wa.textContent!==k,$b=/<.*?>/g;m=function(a){this.$=function(a,b){return this.api(!0).$(a,b)};this._=function(a,b){return this.api(!0).rows(a,b).data()};this.api=function(a){return a?new t(za(this[u.iApiIndex])):new t(this)};this.fnAddData=function(a,b){var c=this.api(!0),e=h.isArray(a)&&(h.isArray(a[0])||h.isPlainObject(a[0]))?c.rows.add(a):c.row.add(a);(b===
-k||b)&&c.draw();return e.flatten().toArray()};this.fnAdjustColumnSizing=function(a){var b=this.api(!0).columns.adjust(),c=b.settings()[0],e=c.oScroll;a===k||a?b.draw(!1):(""!==e.sX||""!==e.sY)&&Y(c)};this.fnClearTable=function(a){var b=this.api(!0).clear();(a===k||a)&&b.draw()};this.fnClose=function(a){this.api(!0).row(a).child.hide()};this.fnDeleteRow=function(a,b,c){var e=this.api(!0),a=e.rows(a),d=a.settings()[0],h=d.aoData[a[0][0]];a.remove();b&&b.call(this,d,h);(c===k||c)&&e.draw();return h};
-this.fnDestroy=function(a){this.api(!0).destroy(a)};this.fnDraw=function(a){this.api(!0).draw(a)};this.fnFilter=function(a,b,c,e,d,h){d=this.api(!0);null===b||b===k?d.search(a,c,e,h):d.column(b).search(a,c,e,h);d.draw()};this.fnGetData=function(a,b){var c=this.api(!0);if(a!==k){var e=a.nodeName?a.nodeName.toLowerCase():"";return b!==k||"td"==e||"th"==e?c.cell(a,b).data():c.row(a).data()||null}return c.data().toArray()};this.fnGetNodes=function(a){var b=this.api(!0);return a!==k?b.row(a).node():b.rows().nodes().flatten().toArray()};
-this.fnGetPosition=function(a){var b=this.api(!0),c=a.nodeName.toUpperCase();return"TR"==c?b.row(a).index():"TD"==c||"TH"==c?(a=b.cell(a).index(),[a.row,a.columnVisible,a.column]):null};this.fnIsOpen=function(a){return this.api(!0).row(a).child.isShown()};this.fnOpen=function(a,b,c){return this.api(!0).row(a).child(b,c).show().child()[0]};this.fnPageChange=function(a,b){var c=this.api(!0).page(a);(b===k||b)&&c.draw(!1)};this.fnSetColumnVis=function(a,b,c){a=this.api(!0).column(a).visible(b);(c===
-k||c)&&a.columns.adjust().draw()};this.fnSettings=function(){return za(this[u.iApiIndex])};this.fnSort=function(a){this.api(!0).order(a).draw()};this.fnSortListener=function(a,b,c){this.api(!0).order.listener(a,b,c)};this.fnUpdate=function(a,b,c,e,d){var h=this.api(!0);c===k||null===c?h.row(b).data(a):h.cell(b,c).data(a);(d===k||d)&&h.columns.adjust();(e===k||e)&&h.draw();return 0};this.fnVersionCheck=u.fnVersionCheck;var b=this,c=a===k,e=this.length;c&&(a={});this.oApi=this.internal=u.internal;for(var d in m.ext.internal)d&&
-(this[d]=Nb(d));this.each(function(){var d={},d=1<e?Lb(d,a,!0):a,g=0,j,i=this.getAttribute("id"),o=!1,l=m.defaults,q=h(this);if("table"!=this.nodeName.toLowerCase())I(null,0,"Non-table node initialisation ("+this.nodeName+")",2);else{eb(l);fb(l.column);H(l,l,!0);H(l.column,l.column,!0);H(l,h.extend(d,q.data()));var n=m.settings,g=0;for(j=n.length;g<j;g++){var r=n[g];if(r.nTable==this||r.nTHead.parentNode==this||r.nTFoot&&r.nTFoot.parentNode==this){g=d.bRetrieve!==k?d.bRetrieve:l.bRetrieve;if(c||g)return r.oInstance;
-if(d.bDestroy!==k?d.bDestroy:l.bDestroy){r.oInstance.fnDestroy();break}else{I(r,0,"Cannot reinitialise DataTable",3);return}}if(r.sTableId==this.id){n.splice(g,1);break}}if(null===i||""===i)this.id=i="DataTables_Table_"+m.ext._unique++;var p=h.extend(!0,{},m.models.oSettings,{sDestroyWidth:q[0].style.width,sInstance:i,sTableId:i});p.nTable=this;p.oApi=b.internal;p.oInit=d;n.push(p);p.oInstance=1===b.length?b:q.dataTable();eb(d);d.oLanguage&&P(d.oLanguage);d.aLengthMenu&&!d.iDisplayLength&&(d.iDisplayLength=
-h.isArray(d.aLengthMenu[0])?d.aLengthMenu[0][0]:d.aLengthMenu[0]);d=Lb(h.extend(!0,{},l),d);E(p.oFeatures,d,"bPaginate bLengthChange bFilter bSort bSortMulti bInfo bProcessing bAutoWidth bSortClasses bServerSide bDeferRender".split(" "));E(p,d,["asStripeClasses","ajax","fnServerData","fnFormatNumber","sServerMethod","aaSorting","aaSortingFixed","aLengthMenu","sPaginationType","sAjaxSource","sAjaxDataProp","iStateDuration","sDom","bSortCellsTop","iTabIndex","fnStateLoadCallback","fnStateSaveCallback",
-"renderer","searchDelay",["iCookieDuration","iStateDuration"],["oSearch","oPreviousSearch"],["aoSearchCols","aoPreSearchCols"],["iDisplayLength","_iDisplayLength"],["bJQueryUI","bJUI"]]);E(p.oScroll,d,[["sScrollX","sX"],["sScrollXInner","sXInner"],["sScrollY","sY"],["bScrollCollapse","bCollapse"]]);E(p.oLanguage,d,"fnInfoCallback");z(p,"aoDrawCallback",d.fnDrawCallback,"user");z(p,"aoServerParams",d.fnServerParams,"user");z(p,"aoStateSaveParams",d.fnStateSaveParams,"user");z(p,"aoStateLoadParams",
-d.fnStateLoadParams,"user");z(p,"aoStateLoaded",d.fnStateLoaded,"user");z(p,"aoRowCallback",d.fnRowCallback,"user");z(p,"aoRowCreatedCallback",d.fnCreatedRow,"user");z(p,"aoHeaderCallback",d.fnHeaderCallback,"user");z(p,"aoFooterCallback",d.fnFooterCallback,"user");z(p,"aoInitComplete",d.fnInitComplete,"user");z(p,"aoPreDrawCallback",d.fnPreDrawCallback,"user");i=p.oClasses;d.bJQueryUI?(h.extend(i,m.ext.oJUIClasses,d.oClasses),d.sDom===l.sDom&&"lfrtip"===l.sDom&&(p.sDom='<"H"lfr>t<"F"ip>'),p.renderer)?
-h.isPlainObject(p.renderer)&&!p.renderer.header&&(p.renderer.header="jqueryui"):p.renderer="jqueryui":h.extend(i,m.ext.classes,d.oClasses);q.addClass(i.sTable);if(""!==p.oScroll.sX||""!==p.oScroll.sY)p.oScroll.iBarWidth=Hb();!0===p.oScroll.sX&&(p.oScroll.sX="100%");p.iInitDisplayStart===k&&(p.iInitDisplayStart=d.iDisplayStart,p._iDisplayStart=d.iDisplayStart);null!==d.iDeferLoading&&(p.bDeferLoading=!0,g=h.isArray(d.iDeferLoading),p._iRecordsDisplay=g?d.iDeferLoading[0]:d.iDeferLoading,p._iRecordsTotal=
-g?d.iDeferLoading[1]:d.iDeferLoading);var t=p.oLanguage;h.extend(!0,t,d.oLanguage);""!==t.sUrl&&(h.ajax({dataType:"json",url:t.sUrl,success:function(a){P(a);H(l.oLanguage,a);h.extend(true,t,a);ga(p)},error:function(){ga(p)}}),o=!0);null===d.asStripeClasses&&(p.asStripeClasses=[i.sStripeOdd,i.sStripeEven]);var g=p.asStripeClasses,s=q.children("tbody").find("tr").eq(0);-1!==h.inArray(!0,h.map(g,function(a){return s.hasClass(a)}))&&(h("tbody tr",this).removeClass(g.join(" ")),p.asDestroyStripes=g.slice());
-n=[];g=this.getElementsByTagName("thead");0!==g.length&&(da(p.aoHeader,g[0]),n=qa(p));if(null===d.aoColumns){r=[];g=0;for(j=n.length;g<j;g++)r.push(null)}else r=d.aoColumns;g=0;for(j=r.length;g<j;g++)Fa(p,n?n[g]:null);ib(p,d.aoColumnDefs,r,function(a,b){ka(p,a,b)});if(s.length){var u=function(a,b){return a.getAttribute("data-"+b)!==null?b:null};h.each(na(p,s[0]).cells,function(a,b){var c=p.aoColumns[a];if(c.mData===a){var d=u(b,"sort")||u(b,"order"),e=u(b,"filter")||u(b,"search");if(d!==null||e!==
-null){c.mData={_:a+".display",sort:d!==null?a+".@data-"+d:k,type:d!==null?a+".@data-"+d:k,filter:e!==null?a+".@data-"+e:k};ka(p,a)}}})}var v=p.oFeatures;d.bStateSave&&(v.bStateSave=!0,Kb(p,d),z(p,"aoDrawCallback",ya,"state_save"));if(d.aaSorting===k){n=p.aaSorting;g=0;for(j=n.length;g<j;g++)n[g][1]=p.aoColumns[g].asSorting[0]}xa(p);v.bSort&&z(p,"aoDrawCallback",function(){if(p.bSorted){var a=U(p),b={};h.each(a,function(a,c){b[c.src]=c.dir});w(p,null,"order",[p,a,b]);Jb(p)}});z(p,"aoDrawCallback",
-function(){(p.bSorted||B(p)==="ssp"||v.bDeferRender)&&xa(p)},"sc");gb(p);g=q.children("caption").each(function(){this._captionSide=q.css("caption-side")});j=q.children("thead");0===j.length&&(j=h("<thead/>").appendTo(this));p.nTHead=j[0];j=q.children("tbody");0===j.length&&(j=h("<tbody/>").appendTo(this));p.nTBody=j[0];j=q.children("tfoot");if(0===j.length&&0<g.length&&(""!==p.oScroll.sX||""!==p.oScroll.sY))j=h("<tfoot/>").appendTo(this);0===j.length||0===j.children().length?q.addClass(i.sNoFooter):
-0<j.length&&(p.nTFoot=j[0],da(p.aoFooter,p.nTFoot));if(d.aaData)for(g=0;g<d.aaData.length;g++)K(p,d.aaData[g]);else(p.bDeferLoading||"dom"==B(p))&&ma(p,h(p.nTBody).children("tr"));p.aiDisplay=p.aiDisplayMaster.slice();p.bInitialised=!0;!1===o&&ga(p)}});b=null;return this};var Tb=[],y=Array.prototype,cc=function(a){var b,c,e=m.settings,d=h.map(e,function(a){return a.nTable});if(a){if(a.nTable&&a.oApi)return[a];if(a.nodeName&&"table"===a.nodeName.toLowerCase())return b=h.inArray(a,d),-1!==b?[e[b]]:
-null;if(a&&"function"===typeof a.settings)return a.settings().toArray();"string"===typeof a?c=h(a):a instanceof h&&(c=a)}else return[];if(c)return c.map(function(){b=h.inArray(this,d);return-1!==b?e[b]:null}).toArray()};t=function(a,b){if(!(this instanceof t))return new t(a,b);var c=[],e=function(a){(a=cc(a))&&c.push.apply(c,a)};if(h.isArray(a))for(var d=0,f=a.length;d<f;d++)e(a[d]);else e(a);this.context=Na(c);b&&this.push.apply(this,b.toArray?b.toArray():b);this.selector={rows:null,cols:null,opts:null};
-t.extend(this,this,Tb)};m.Api=t;t.prototype={any:function(){return 0!==this.flatten().length},concat:y.concat,context:[],each:function(a){for(var b=0,c=this.length;b<c;b++)a.call(this,this[b],b,this);return this},eq:function(a){var b=this.context;return b.length>a?new t(b[a],this[a]):null},filter:function(a){var b=[];if(y.filter)b=y.filter.call(this,a,this);else for(var c=0,e=this.length;c<e;c++)a.call(this,this[c],c,this)&&b.push(this[c]);return new t(this.context,b)},flatten:function(){var a=[];
-return new t(this.context,a.concat.apply(a,this.toArray()))},join:y.join,indexOf:y.indexOf||function(a,b){for(var c=b||0,e=this.length;c<e;c++)if(this[c]===a)return c;return-1},iterator:function(a,b,c,e){var d=[],f,g,h,i,o,l=this.context,q,n,m=this.selector;"string"===typeof a&&(e=c,c=b,b=a,a=!1);g=0;for(h=l.length;g<h;g++){var p=new t(l[g]);if("table"===b)f=c.call(p,l[g],g),f!==k&&d.push(f);else if("columns"===b||"rows"===b)f=c.call(p,l[g],this[g],g),f!==k&&d.push(f);else if("column"===b||"column-rows"===
-b||"row"===b||"cell"===b){n=this[g];"column-rows"===b&&(q=Ca(l[g],m.opts));i=0;for(o=n.length;i<o;i++)f=n[i],f="cell"===b?c.call(p,l[g],f.row,f.column,g,i):c.call(p,l[g],f,g,i,q),f!==k&&d.push(f)}}return d.length||e?(a=new t(l,a?d.concat.apply([],d):d),b=a.selector,b.rows=m.rows,b.cols=m.cols,b.opts=m.opts,a):this},lastIndexOf:y.lastIndexOf||function(a,b){return this.indexOf.apply(this.toArray.reverse(),arguments)},length:0,map:function(a){var b=[];if(y.map)b=y.map.call(this,a,this);else for(var c=
-0,e=this.length;c<e;c++)b.push(a.call(this,this[c],c));return new t(this.context,b)},pluck:function(a){return this.map(function(b){return b[a]})},pop:y.pop,push:y.push,reduce:y.reduce||function(a,b){return hb(this,a,b,0,this.length,1)},reduceRight:y.reduceRight||function(a,b){return hb(this,a,b,this.length-1,-1,-1)},reverse:y.reverse,selector:null,shift:y.shift,sort:y.sort,splice:y.splice,toArray:function(){return y.slice.call(this)},to$:function(){return h(this)},toJQuery:function(){return h(this)},
-unique:function(){return new t(this.context,Na(this))},unshift:y.unshift};t.extend=function(a,b,c){if(c.length&&b&&(b instanceof t||b.__dt_wrapper)){var e,d,f,g=function(a,b,c){return function(){var d=b.apply(a,arguments);t.extend(d,d,c.methodExt);return d}};e=0;for(d=c.length;e<d;e++)f=c[e],b[f.name]="function"===typeof f.val?g(a,f.val,f):h.isPlainObject(f.val)?{}:f.val,b[f.name].__dt_wrapper=!0,t.extend(a,b[f.name],f.propExt)}};t.register=r=function(a,b){if(h.isArray(a))for(var c=0,e=a.length;c<
-e;c++)t.register(a[c],b);else for(var d=a.split("."),f=Tb,g,j,c=0,e=d.length;c<e;c++){g=(j=-1!==d[c].indexOf("()"))?d[c].replace("()",""):d[c];var i;a:{i=0;for(var o=f.length;i<o;i++)if(f[i].name===g){i=f[i];break a}i=null}i||(i={name:g,val:{},methodExt:[],propExt:[]},f.push(i));c===e-1?i.val=b:f=j?i.methodExt:i.propExt}};t.registerPlural=v=function(a,b,c){t.register(a,c);t.register(b,function(){var a=c.apply(this,arguments);return a===this?this:a instanceof t?a.length?h.isArray(a[0])?new t(a.context,
-a[0]):a[0]:k:a})};r("tables()",function(a){var b;if(a){b=t;var c=this.context;if("number"===typeof a)a=[c[a]];else var e=h.map(c,function(a){return a.nTable}),a=h(e).filter(a).map(function(){var a=h.inArray(this,e);return c[a]}).toArray();b=new b(a)}else b=this;return b});r("table()",function(a){var a=this.tables(a),b=a.context;return b.length?new t(b[0]):a});v("tables().nodes()","table().node()",function(){return this.iterator("table",function(a){return a.nTable},1)});v("tables().body()","table().body()",
-function(){return this.iterator("table",function(a){return a.nTBody},1)});v("tables().header()","table().header()",function(){return this.iterator("table",function(a){return a.nTHead},1)});v("tables().footer()","table().footer()",function(){return this.iterator("table",function(a){return a.nTFoot},1)});v("tables().containers()","table().container()",function(){return this.iterator("table",function(a){return a.nTableWrapper},1)});r("draw()",function(a){return this.iterator("table",function(b){N(b,
-!1===a)})});r("page()",function(a){return a===k?this.page.info().page:this.iterator("table",function(b){Ta(b,a)})});r("page.info()",function(){if(0===this.context.length)return k;var a=this.context[0],b=a._iDisplayStart,c=a._iDisplayLength,e=a.fnRecordsDisplay(),d=-1===c;return{page:d?0:Math.floor(b/c),pages:d?1:Math.ceil(e/c),start:b,end:a.fnDisplayEnd(),length:c,recordsTotal:a.fnRecordsTotal(),recordsDisplay:e}});r("page.len()",function(a){return a===k?0!==this.context.length?this.context[0]._iDisplayLength:
-k:this.iterator("table",function(b){Ra(b,a)})});var Ub=function(a,b,c){if(c){var e=new t(a);e.one("draw",function(){c(e.ajax.json())})}"ssp"==B(a)?N(a,b):(C(a,!0),ra(a,[],function(c){oa(a);for(var c=sa(a,c),e=0,g=c.length;e<g;e++)K(a,c[e]);N(a,b);C(a,!1)}))};r("ajax.json()",function(){var a=this.context;if(0<a.length)return a[0].json});r("ajax.params()",function(){var a=this.context;if(0<a.length)return a[0].oAjaxData});r("ajax.reload()",function(a,b){return this.iterator("table",function(c){Ub(c,
-!1===b,a)})});r("ajax.url()",function(a){var b=this.context;if(a===k){if(0===b.length)return k;b=b[0];return b.ajax?h.isPlainObject(b.ajax)?b.ajax.url:b.ajax:b.sAjaxSource}return this.iterator("table",function(b){h.isPlainObject(b.ajax)?b.ajax.url=a:b.ajax=a})});r("ajax.url().load()",function(a,b){return this.iterator("table",function(c){Ub(c,!1===b,a)})});var $a=function(a,b,c,e,d){var f=[],g,j,i,o,l,q;i=typeof b;if(!b||"string"===i||"function"===i||b.length===k)b=[b];i=0;for(o=b.length;i<o;i++){j=
-b[i]&&b[i].split?b[i].split(","):[b[i]];l=0;for(q=j.length;l<q;l++)(g=c("string"===typeof j[l]?h.trim(j[l]):j[l]))&&g.length&&f.push.apply(f,g)}a=u.selector[a];if(a.length){i=0;for(o=a.length;i<o;i++)f=a[i](e,d,f)}return f},ab=function(a){a||(a={});a.filter&&a.search===k&&(a.search=a.filter);return h.extend({search:"none",order:"current",page:"all"},a)},bb=function(a){for(var b=0,c=a.length;b<c;b++)if(0<a[b].length)return a[0]=a[b],a[0].length=1,a.length=1,a.context=[a.context[b]],a;a.length=0;return a},
-Ca=function(a,b){var c,e,d,f=[],g=a.aiDisplay;c=a.aiDisplayMaster;var j=b.search;e=b.order;d=b.page;if("ssp"==B(a))return"removed"===j?[]:V(0,c.length);if("current"==d){c=a._iDisplayStart;for(e=a.fnDisplayEnd();c<e;c++)f.push(g[c])}else if("current"==e||"applied"==e)f="none"==j?c.slice():"applied"==j?g.slice():h.map(c,function(a){return-1===h.inArray(a,g)?a:null});else if("index"==e||"original"==e){c=0;for(e=a.aoData.length;c<e;c++)"none"==j?f.push(c):(d=h.inArray(c,g),(-1===d&&"removed"==j||0<=d&&
-"applied"==j)&&f.push(c))}return f};r("rows()",function(a,b){a===k?a="":h.isPlainObject(a)&&(b=a,a="");var b=ab(b),c=this.iterator("table",function(c){var d=b;return $a("row",a,function(a){var b=Pb(a);if(b!==null&&!d)return[b];var j=Ca(c,d);if(b!==null&&h.inArray(b,j)!==-1)return[b];if(!a)return j;if(typeof a==="function")return h.map(j,function(b){var d=c.aoData[b];return a(b,d._aData,d.nTr)?b:null});b=Sb(ia(c.aoData,j,"nTr"));return a.nodeName&&h.inArray(a,b)!==-1?[a._DT_RowIndex]:h(b).filter(a).map(function(){return this._DT_RowIndex}).toArray()},
-c,d)},1);c.selector.rows=a;c.selector.opts=b;return c});r("rows().nodes()",function(){return this.iterator("row",function(a,b){return a.aoData[b].nTr||k},1)});r("rows().data()",function(){return this.iterator(!0,"rows",function(a,b){return ia(a.aoData,b,"_aData")},1)});v("rows().cache()","row().cache()",function(a){return this.iterator("row",function(b,c){var e=b.aoData[c];return"search"===a?e._aFilterData:e._aSortData},1)});v("rows().invalidate()","row().invalidate()",function(a){return this.iterator("row",
-function(b,c){ca(b,c,a)})});v("rows().indexes()","row().index()",function(){return this.iterator("row",function(a,b){return b},1)});v("rows().remove()","row().remove()",function(){var a=this;return this.iterator("row",function(b,c,e){var d=b.aoData;d.splice(c,1);for(var f=0,g=d.length;f<g;f++)null!==d[f].nTr&&(d[f].nTr._DT_RowIndex=f);h.inArray(c,b.aiDisplay);pa(b.aiDisplayMaster,c);pa(b.aiDisplay,c);pa(a[e],c,!1);Sa(b)})});r("rows.add()",function(a){var b=this.iterator("table",function(b){var c,
-f,g,h=[];f=0;for(g=a.length;f<g;f++)c=a[f],c.nodeName&&"TR"===c.nodeName.toUpperCase()?h.push(ma(b,c)[0]):h.push(K(b,c));return h},1),c=this.rows(-1);c.pop();c.push.apply(c,b.toArray());return c});r("row()",function(a,b){return bb(this.rows(a,b))});r("row().data()",function(a){var b=this.context;if(a===k)return b.length&&this.length?b[0].aoData[this[0]]._aData:k;b[0].aoData[this[0]]._aData=a;ca(b[0],this[0],"data");return this});r("row().node()",function(){var a=this.context;return a.length&&this.length?
-a[0].aoData[this[0]].nTr||null:null});r("row.add()",function(a){a instanceof h&&a.length&&(a=a[0]);var b=this.iterator("table",function(b){return a.nodeName&&"TR"===a.nodeName.toUpperCase()?ma(b,a)[0]:K(b,a)});return this.row(b[0])});var cb=function(a,b){var c=a.context;c.length&&(c=c[0].aoData[b!==k?b:a[0]],c._details&&(c._details.remove(),c._detailsShow=k,c._details=k))},Vb=function(a,b){var c=a.context;if(c.length&&a.length){var e=c[0].aoData[a[0]];if(e._details){(e._detailsShow=b)?e._details.insertAfter(e.nTr):
-e._details.detach();var d=c[0],f=new t(d),g=d.aoData;f.off("draw.dt.DT_details column-visibility.dt.DT_details destroy.dt.DT_details");0<D(g,"_details").length&&(f.on("draw.dt.DT_details",function(a,b){d===b&&f.rows({page:"current"}).eq(0).each(function(a){a=g[a];a._detailsShow&&a._details.insertAfter(a.nTr)})}),f.on("column-visibility.dt.DT_details",function(a,b){if(d===b)for(var c,e=aa(b),f=0,h=g.length;f<h;f++)c=g[f],c._details&&c._details.children("td[colspan]").attr("colspan",e)}),f.on("destroy.dt.DT_details",
-function(a,b){if(d===b)for(var c=0,e=g.length;c<e;c++)g[c]._details&&cb(f,c)}))}}};r("row().child()",function(a,b){var c=this.context;if(a===k)return c.length&&this.length?c[0].aoData[this[0]]._details:k;if(!0===a)this.child.show();else if(!1===a)cb(this);else if(c.length&&this.length){var e=c[0],c=c[0].aoData[this[0]],d=[],f=function(a,b){if(h.isArray(a)||a instanceof h)for(var c=0,k=a.length;c<k;c++)f(a[c],b);else a.nodeName&&"tr"===a.nodeName.toLowerCase()?d.push(a):(c=h("<tr><td/></tr>").addClass(b),
-h("td",c).addClass(b).html(a)[0].colSpan=aa(e),d.push(c[0]))};f(a,b);c._details&&c._details.remove();c._details=h(d);c._detailsShow&&c._details.insertAfter(c.nTr)}return this});r(["row().child.show()","row().child().show()"],function(){Vb(this,!0);return this});r(["row().child.hide()","row().child().hide()"],function(){Vb(this,!1);return this});r(["row().child.remove()","row().child().remove()"],function(){cb(this);return this});r("row().child.isShown()",function(){var a=this.context;return a.length&&
-this.length?a[0].aoData[this[0]]._detailsShow||!1:!1});var dc=/^(.+):(name|visIdx|visible)$/,Wb=function(a,b,c,e,d){for(var c=[],e=0,f=d.length;e<f;e++)c.push(x(a,d[e],b));return c};r("columns()",function(a,b){a===k?a="":h.isPlainObject(a)&&(b=a,a="");var b=ab(b),c=this.iterator("table",function(c){var d=a,f=b,g=c.aoColumns,j=D(g,"sName"),i=D(g,"nTh");return $a("column",d,function(a){var b=Pb(a);if(a==="")return V(g.length);if(b!==null)return[b>=0?b:g.length+b];if(typeof a==="function"){var d=Ca(c,
-f);return h.map(g,function(b,f){return a(f,Wb(c,f,0,0,d),i[f])?f:null})}var k=typeof a==="string"?a.match(dc):"";if(k)switch(k[2]){case "visIdx":case "visible":b=parseInt(k[1],10);if(b<0){var m=h.map(g,function(a,b){return a.bVisible?b:null});return[m[m.length+b]]}return[la(c,b)];case "name":return h.map(j,function(a,b){return a===k[1]?b:null})}else return h(i).filter(a).map(function(){return h.inArray(this,i)}).toArray()},c,f)},1);c.selector.cols=a;c.selector.opts=b;return c});v("columns().header()",
-"column().header()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].nTh},1)});v("columns().footer()","column().footer()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].nTf},1)});v("columns().data()","column().data()",function(){return this.iterator("column-rows",Wb,1)});v("columns().dataSrc()","column().dataSrc()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].mData},1)});v("columns().cache()","column().cache()",
-function(a){return this.iterator("column-rows",function(b,c,e,d,f){return ia(b.aoData,f,"search"===a?"_aFilterData":"_aSortData",c)},1)});v("columns().nodes()","column().nodes()",function(){return this.iterator("column-rows",function(a,b,c,e,d){return ia(a.aoData,d,"anCells",b)},1)});v("columns().visible()","column().visible()",function(a,b){return this.iterator("column",function(c,e){if(a===k)return c.aoColumns[e].bVisible;var d=c.aoColumns,f=d[e],g=c.aoData,j,i,m;if(a!==k&&f.bVisible!==a){if(a){var l=
-h.inArray(!0,D(d,"bVisible"),e+1);j=0;for(i=g.length;j<i;j++)m=g[j].nTr,d=g[j].anCells,m&&m.insertBefore(d[e],d[l]||null)}else h(D(c.aoData,"anCells",e)).detach();f.bVisible=a;ea(c,c.aoHeader);ea(c,c.aoFooter);if(b===k||b)X(c),(c.oScroll.sX||c.oScroll.sY)&&Y(c);w(c,null,"column-visibility",[c,e,a]);ya(c)}})});v("columns().indexes()","column().index()",function(a){return this.iterator("column",function(b,c){return"visible"===a?$(b,c):c},1)});r("columns.adjust()",function(){return this.iterator("table",
-function(a){X(a)},1)});r("column.index()",function(a,b){if(0!==this.context.length){var c=this.context[0];if("fromVisible"===a||"toData"===a)return la(c,b);if("fromData"===a||"toVisible"===a)return $(c,b)}});r("column()",function(a,b){return bb(this.columns(a,b))});r("cells()",function(a,b,c){h.isPlainObject(a)&&(a.row===k?(c=a,a=null):(c=b,b=null));h.isPlainObject(b)&&(c=b,b=null);if(null===b||b===k)return this.iterator("table",function(b){var d=a,e=ab(c),f=b.aoData,g=Ca(b,e),i=Sb(ia(f,g,"anCells")),
-j=h([].concat.apply([],i)),l,m=b.aoColumns.length,o,r,t,s,u,v;return $a("cell",d,function(a){var c=typeof a==="function";if(a===null||a===k||c){o=[];r=0;for(t=g.length;r<t;r++){l=g[r];for(s=0;s<m;s++){u={row:l,column:s};if(c){v=b.aoData[l];a(u,x(b,l,s),v.anCells?v.anCells[s]:null)&&o.push(u)}else o.push(u)}}return o}return h.isPlainObject(a)?[a]:j.filter(a).map(function(a,b){l=b.parentNode._DT_RowIndex;return{row:l,column:h.inArray(b,f[l].anCells)}}).toArray()},b,e)});var e=this.columns(b,c),d=this.rows(a,
-c),f,g,j,i,m,l=this.iterator("table",function(a,b){f=[];g=0;for(j=d[b].length;g<j;g++){i=0;for(m=e[b].length;i<m;i++)f.push({row:d[b][g],column:e[b][i]})}return f},1);h.extend(l.selector,{cols:b,rows:a,opts:c});return l});v("cells().nodes()","cell().node()",function(){return this.iterator("cell",function(a,b,c){return(a=a.aoData[b].anCells)?a[c]:k},1)});r("cells().data()",function(){return this.iterator("cell",function(a,b,c){return x(a,b,c)},1)});v("cells().cache()","cell().cache()",function(a){a=
-"search"===a?"_aFilterData":"_aSortData";return this.iterator("cell",function(b,c,e){return b.aoData[c][a][e]},1)});v("cells().render()","cell().render()",function(a){return this.iterator("cell",function(b,c,e){return x(b,c,e,a)},1)});v("cells().indexes()","cell().index()",function(){return this.iterator("cell",function(a,b,c){return{row:b,column:c,columnVisible:$(a,c)}},1)});v("cells().invalidate()","cell().invalidate()",function(a){return this.iterator("cell",function(b,c,e){ca(b,c,a,e)})});r("cell()",
-function(a,b,c){return bb(this.cells(a,b,c))});r("cell().data()",function(a){var b=this.context,c=this[0];if(a===k)return b.length&&c.length?x(b[0],c[0].row,c[0].column):k;Ia(b[0],c[0].row,c[0].column,a);ca(b[0],c[0].row,"data",c[0].column);return this});r("order()",function(a,b){var c=this.context;if(a===k)return 0!==c.length?c[0].aaSorting:k;"number"===typeof a?a=[[a,b]]:h.isArray(a[0])||(a=Array.prototype.slice.call(arguments));return this.iterator("table",function(b){b.aaSorting=a.slice()})});
-r("order.listener()",function(a,b,c){return this.iterator("table",function(e){Oa(e,a,b,c)})});r(["columns().order()","column().order()"],function(a){var b=this;return this.iterator("table",function(c,e){var d=[];h.each(b[e],function(b,c){d.push([c,a])});c.aaSorting=d})});r("search()",function(a,b,c,e){var d=this.context;return a===k?0!==d.length?d[0].oPreviousSearch.sSearch:k:this.iterator("table",function(d){d.oFeatures.bFilter&&fa(d,h.extend({},d.oPreviousSearch,{sSearch:a+"",bRegex:null===b?!1:
-b,bSmart:null===c?!0:c,bCaseInsensitive:null===e?!0:e}),1)})});v("columns().search()","column().search()",function(a,b,c,e){return this.iterator("column",function(d,f){var g=d.aoPreSearchCols;if(a===k)return g[f].sSearch;d.oFeatures.bFilter&&(h.extend(g[f],{sSearch:a+"",bRegex:null===b?!1:b,bSmart:null===c?!0:c,bCaseInsensitive:null===e?!0:e}),fa(d,d.oPreviousSearch,1))})});r("state()",function(){return this.context.length?this.context[0].oSavedState:null});r("state.clear()",function(){return this.iterator("table",
-function(a){a.fnStateSaveCallback.call(a.oInstance,a,{})})});r("state.loaded()",function(){return this.context.length?this.context[0].oLoadedState:null});r("state.save()",function(){return this.iterator("table",function(a){ya(a)})});m.versionCheck=m.fnVersionCheck=function(a){for(var b=m.version.split("."),a=a.split("."),c,e,d=0,f=a.length;d<f;d++)if(c=parseInt(b[d],10)||0,e=parseInt(a[d],10)||0,c!==e)return c>e;return!0};m.isDataTable=m.fnIsDataTable=function(a){var b=h(a).get(0),c=!1;h.each(m.settings,
-function(a,d){var f=d.nScrollHead?h("table",d.nScrollHead)[0]:null,g=d.nScrollFoot?h("table",d.nScrollFoot)[0]:null;if(d.nTable===b||f===b||g===b)c=!0});return c};m.tables=m.fnTables=function(a){return h.map(m.settings,function(b){if(!a||a&&h(b.nTable).is(":visible"))return b.nTable})};m.util={throttle:ua,escapeRegex:va};m.camelToHungarian=H;r("$()",function(a,b){var c=this.rows(b).nodes(),c=h(c);return h([].concat(c.filter(a).toArray(),c.find(a).toArray()))});h.each(["on","one","off"],function(a,
-b){r(b+"()",function(){var a=Array.prototype.slice.call(arguments);a[0].match(/\.dt\b/)||(a[0]+=".dt");var e=h(this.tables().nodes());e[b].apply(e,a);return this})});r("clear()",function(){return this.iterator("table",function(a){oa(a)})});r("settings()",function(){return new t(this.context,this.context)});r("init()",function(){var a=this.context;return a.length?a[0].oInit:null});r("data()",function(){return this.iterator("table",function(a){return D(a.aoData,"_aData")}).flatten()});r("destroy()",
-function(a){a=a||!1;return this.iterator("table",function(b){var c=b.nTableWrapper.parentNode,e=b.oClasses,d=b.nTable,f=b.nTBody,g=b.nTHead,j=b.nTFoot,i=h(d),f=h(f),k=h(b.nTableWrapper),l=h.map(b.aoData,function(a){return a.nTr}),q;b.bDestroying=!0;w(b,"aoDestroyCallback","destroy",[b]);a||(new t(b)).columns().visible(!0);k.unbind(".DT").find(":not(tbody *)").unbind(".DT");h(Ea).unbind(".DT-"+b.sInstance);d!=g.parentNode&&(i.children("thead").detach(),i.append(g));j&&d!=j.parentNode&&(i.children("tfoot").detach(),
-i.append(j));i.detach();k.detach();b.aaSorting=[];b.aaSortingFixed=[];xa(b);h(l).removeClass(b.asStripeClasses.join(" "));h("th, td",g).removeClass(e.sSortable+" "+e.sSortableAsc+" "+e.sSortableDesc+" "+e.sSortableNone);b.bJUI&&(h("th span."+e.sSortIcon+", td span."+e.sSortIcon,g).detach(),h("th, td",g).each(function(){var a=h("div."+e.sSortJUIWrapper,this);h(this).append(a.contents());a.detach()}));!a&&c&&c.insertBefore(d,b.nTableReinsertBefore);f.children().detach();f.append(l);i.css("width",b.sDestroyWidth).removeClass(e.sTable);
-(q=b.asDestroyStripes.length)&&f.children().each(function(a){h(this).addClass(b.asDestroyStripes[a%q])});c=h.inArray(b,m.settings);-1!==c&&m.settings.splice(c,1)})});h.each(["column","row","cell"],function(a,b){r(b+"s().every()",function(a){return this.iterator(b,function(e,d,f){a.call((new t(e))[b](d,f))})})});r("i18n()",function(a,b,c){var e=this.context[0],a=R(a)(e.oLanguage);a===k&&(a=b);c!==k&&h.isPlainObject(a)&&(a=a[c]!==k?a[c]:a._);return a.replace("%d",c)});m.version="1.10.7";m.settings=
-[];m.models={};m.models.oSearch={bCaseInsensitive:!0,sSearch:"",bRegex:!1,bSmart:!0};m.models.oRow={nTr:null,anCells:null,_aData:[],_aSortData:null,_aFilterData:null,_sFilterRow:null,_sRowStripe:"",src:null};m.models.oColumn={idx:null,aDataSort:null,asSorting:null,bSearchable:null,bSortable:null,bVisible:null,_sManualType:null,_bAttrSrc:!1,fnCreatedCell:null,fnGetData:null,fnSetData:null,mData:null,mRender:null,nTh:null,nTf:null,sClass:null,sContentPadding:null,sDefaultContent:null,sName:null,sSortDataType:"std",
-sSortingClass:null,sSortingClassJUI:null,sTitle:null,sType:null,sWidth:null,sWidthOrig:null};m.defaults={aaData:null,aaSorting:[[0,"asc"]],aaSortingFixed:[],ajax:null,aLengthMenu:[10,25,50,100],aoColumns:null,aoColumnDefs:null,aoSearchCols:[],asStripeClasses:null,bAutoWidth:!0,bDeferRender:!1,bDestroy:!1,bFilter:!0,bInfo:!0,bJQueryUI:!1,bLengthChange:!0,bPaginate:!0,bProcessing:!1,bRetrieve:!1,bScrollCollapse:!1,bServerSide:!1,bSort:!0,bSortMulti:!0,bSortCellsTop:!1,bSortClasses:!0,bStateSave:!1,
-fnCreatedRow:null,fnDrawCallback:null,fnFooterCallback:null,fnFormatNumber:function(a){return a.toString().replace(/\B(?=(\d{3})+(?!\d))/g,this.oLanguage.sThousands)},fnHeaderCallback:null,fnInfoCallback:null,fnInitComplete:null,fnPreDrawCallback:null,fnRowCallback:null,fnServerData:null,fnServerParams:null,fnStateLoadCallback:function(a){try{return JSON.parse((-1===a.iStateDuration?sessionStorage:localStorage).getItem("DataTables_"+a.sInstance+"_"+location.pathname))}catch(b){}},fnStateLoadParams:null,
-fnStateLoaded:null,fnStateSaveCallback:function(a,b){try{(-1===a.iStateDuration?sessionStorage:localStorage).setItem("DataTables_"+a.sInstance+"_"+location.pathname,JSON.stringify(b))}catch(c){}},fnStateSaveParams:null,iStateDuration:7200,iDeferLoading:null,iDisplayLength:10,iDisplayStart:0,iTabIndex:0,oClasses:{},oLanguage:{oAria:{sSortAscending:": activate to sort column ascending",sSortDescending:": activate to sort column descending"},oPaginate:{sFirst:"First",sLast:"Last",sNext:"Next",sPrevious:"Previous"},
-sEmptyTable:"No data available in table",sInfo:"Showing _START_ to _END_ of _TOTAL_ entries",sInfoEmpty:"Showing 0 to 0 of 0 entries",sInfoFiltered:"(filtered from _MAX_ total entries)",sInfoPostFix:"",sDecimal:"",sThousands:",",sLengthMenu:"Show _MENU_ entries",sLoadingRecords:"Loading...",sProcessing:"Processing...",sSearch:"Search:",sSearchPlaceholder:"",sUrl:"",sZeroRecords:"No matching records found"},oSearch:h.extend({},m.models.oSearch),sAjaxDataProp:"data",sAjaxSource:null,sDom:"lfrtip",searchDelay:null,
-sPaginationType:"simple_numbers",sScrollX:"",sScrollXInner:"",sScrollY:"",sServerMethod:"GET",renderer:null};W(m.defaults);m.defaults.column={aDataSort:null,iDataSort:-1,asSorting:["asc","desc"],bSearchable:!0,bSortable:!0,bVisible:!0,fnCreatedCell:null,mData:null,mRender:null,sCellType:"td",sClass:"",sContentPadding:"",sDefaultContent:null,sName:"",sSortDataType:"std",sTitle:null,sType:null,sWidth:null};W(m.defaults.column);m.models.oSettings={oFeatures:{bAutoWidth:null,bDeferRender:null,bFilter:null,
-bInfo:null,bLengthChange:null,bPaginate:null,bProcessing:null,bServerSide:null,bSort:null,bSortMulti:null,bSortClasses:null,bStateSave:null},oScroll:{bCollapse:null,iBarWidth:0,sX:null,sXInner:null,sY:null},oLanguage:{fnInfoCallback:null},oBrowser:{bScrollOversize:!1,bScrollbarLeft:!1},ajax:null,aanFeatures:[],aoData:[],aiDisplay:[],aiDisplayMaster:[],aoColumns:[],aoHeader:[],aoFooter:[],oPreviousSearch:{},aoPreSearchCols:[],aaSorting:null,aaSortingFixed:[],asStripeClasses:null,asDestroyStripes:[],
-sDestroyWidth:0,aoRowCallback:[],aoHeaderCallback:[],aoFooterCallback:[],aoDrawCallback:[],aoRowCreatedCallback:[],aoPreDrawCallback:[],aoInitComplete:[],aoStateSaveParams:[],aoStateLoadParams:[],aoStateLoaded:[],sTableId:"",nTable:null,nTHead:null,nTFoot:null,nTBody:null,nTableWrapper:null,bDeferLoading:!1,bInitialised:!1,aoOpenRows:[],sDom:null,searchDelay:null,sPaginationType:"two_button",iStateDuration:0,aoStateSave:[],aoStateLoad:[],oSavedState:null,oLoadedState:null,sAjaxSource:null,sAjaxDataProp:null,
-bAjaxDataGet:!0,jqXHR:null,json:k,oAjaxData:k,fnServerData:null,aoServerParams:[],sServerMethod:null,fnFormatNumber:null,aLengthMenu:null,iDraw:0,bDrawing:!1,iDrawError:-1,_iDisplayLength:10,_iDisplayStart:0,_iRecordsTotal:0,_iRecordsDisplay:0,bJUI:null,oClasses:{},bFiltered:!1,bSorted:!1,bSortCellsTop:null,oInit:null,aoDestroyCallback:[],fnRecordsTotal:function(){return"ssp"==B(this)?1*this._iRecordsTotal:this.aiDisplayMaster.length},fnRecordsDisplay:function(){return"ssp"==B(this)?1*this._iRecordsDisplay:
-this.aiDisplay.length},fnDisplayEnd:function(){var a=this._iDisplayLength,b=this._iDisplayStart,c=b+a,e=this.aiDisplay.length,d=this.oFeatures,f=d.bPaginate;return d.bServerSide?!1===f||-1===a?b+e:Math.min(b+a,this._iRecordsDisplay):!f||c>e||-1===a?e:c},oInstance:null,sInstance:null,iTabIndex:0,nScrollHead:null,nScrollFoot:null,aLastSort:[],oPlugins:{}};m.ext=u={buttons:{},classes:{},errMode:"alert",feature:[],search:[],selector:{cell:[],column:[],row:[]},internal:{},legacy:{ajax:null},pager:{},renderer:{pageButton:{},
-header:{}},order:{},type:{detect:[],search:{},order:{}},_unique:0,fnVersionCheck:m.fnVersionCheck,iApiIndex:0,oJUIClasses:{},sVersion:m.version};h.extend(u,{afnFiltering:u.search,aTypes:u.type.detect,ofnSearch:u.type.search,oSort:u.type.order,afnSortData:u.order,aoFeatures:u.feature,oApi:u.internal,oStdClasses:u.classes,oPagination:u.pager});h.extend(m.ext.classes,{sTable:"dataTable",sNoFooter:"no-footer",sPageButton:"paginate_button",sPageButtonActive:"current",sPageButtonDisabled:"disabled",sStripeOdd:"odd",
-sStripeEven:"even",sRowEmpty:"dataTables_empty",sWrapper:"dataTables_wrapper",sFilter:"dataTables_filter",sInfo:"dataTables_info",sPaging:"dataTables_paginate paging_",sLength:"dataTables_length",sProcessing:"dataTables_processing",sSortAsc:"sorting_asc",sSortDesc:"sorting_desc",sSortable:"sorting",sSortableAsc:"sorting_asc_disabled",sSortableDesc:"sorting_desc_disabled",sSortableNone:"sorting_disabled",sSortColumn:"sorting_",sFilterInput:"",sLengthSelect:"",sScrollWrapper:"dataTables_scroll",sScrollHead:"dataTables_scrollHead",
-sScrollHeadInner:"dataTables_scrollHeadInner",sScrollBody:"dataTables_scrollBody",sScrollFoot:"dataTables_scrollFoot",sScrollFootInner:"dataTables_scrollFootInner",sHeaderTH:"",sFooterTH:"",sSortJUIAsc:"",sSortJUIDesc:"",sSortJUI:"",sSortJUIAscAllowed:"",sSortJUIDescAllowed:"",sSortJUIWrapper:"",sSortIcon:"",sJUIHeader:"",sJUIFooter:""});var Da="",Da="",F=Da+"ui-state-default",ja=Da+"css_right ui-icon ui-icon-",Xb=Da+"fg-toolbar ui-toolbar ui-widget-header ui-helper-clearfix";h.extend(m.ext.oJUIClasses,
-m.ext.classes,{sPageButton:"fg-button ui-button "+F,sPageButtonActive:"ui-state-disabled",sPageButtonDisabled:"ui-state-disabled",sPaging:"dataTables_paginate fg-buttonset ui-buttonset fg-buttonset-multi ui-buttonset-multi paging_",sSortAsc:F+" sorting_asc",sSortDesc:F+" sorting_desc",sSortable:F+" sorting",sSortableAsc:F+" sorting_asc_disabled",sSortableDesc:F+" sorting_desc_disabled",sSortableNone:F+" sorting_disabled",sSortJUIAsc:ja+"triangle-1-n",sSortJUIDesc:ja+"triangle-1-s",sSortJUI:ja+"carat-2-n-s",
-sSortJUIAscAllowed:ja+"carat-1-n",sSortJUIDescAllowed:ja+"carat-1-s",sSortJUIWrapper:"DataTables_sort_wrapper",sSortIcon:"DataTables_sort_icon",sScrollHead:"dataTables_scrollHead "+F,sScrollFoot:"dataTables_scrollFoot "+F,sHeaderTH:F,sFooterTH:F,sJUIHeader:Xb+" ui-corner-tl ui-corner-tr",sJUIFooter:Xb+" ui-corner-bl ui-corner-br"});var Mb=m.ext.pager;h.extend(Mb,{simple:function(){return["previous","next"]},full:function(){return["first","previous","next","last"]},simple_numbers:function(a,b){return["previous",
-Wa(a,b),"next"]},full_numbers:function(a,b){return["first","previous",Wa(a,b),"next","last"]},_numbers:Wa,numbers_length:7});h.extend(!0,m.ext.renderer,{pageButton:{_:function(a,b,c,e,d,f){var g=a.oClasses,j=a.oLanguage.oPaginate,i,k,l=0,m=function(b,e){var n,r,t,s,u=function(b){Ta(a,b.data.action,true)};n=0;for(r=e.length;n<r;n++){s=e[n];if(h.isArray(s)){t=h("<"+(s.DT_el||"div")+"/>").appendTo(b);m(t,s)}else{k=i="";switch(s){case "ellipsis":b.append('<span class="ellipsis">&#x2026;</span>');break;
-case "first":i=j.sFirst;k=s+(d>0?"":" "+g.sPageButtonDisabled);break;case "previous":i=j.sPrevious;k=s+(d>0?"":" "+g.sPageButtonDisabled);break;case "next":i=j.sNext;k=s+(d<f-1?"":" "+g.sPageButtonDisabled);break;case "last":i=j.sLast;k=s+(d<f-1?"":" "+g.sPageButtonDisabled);break;default:i=s+1;k=d===s?g.sPageButtonActive:""}if(i){t=h("<a>",{"class":g.sPageButton+" "+k,"aria-controls":a.sTableId,"data-dt-idx":l,tabindex:a.iTabIndex,id:c===0&&typeof s==="string"?a.sTableId+"_"+s:null}).html(i).appendTo(b);
-Va(t,{action:s},u);l++}}}},n;try{n=h(Q.activeElement).data("dt-idx")}catch(r){}m(h(b).empty(),e);n&&h(b).find("[data-dt-idx="+n+"]").focus()}}});h.extend(m.ext.type.detect,[function(a,b){var c=b.oLanguage.sDecimal;return Za(a,c)?"num"+c:null},function(a){if(a&&!(a instanceof Date)&&(!ac.test(a)||!bc.test(a)))return null;var b=Date.parse(a);return null!==b&&!isNaN(b)||J(a)?"date":null},function(a,b){var c=b.oLanguage.sDecimal;return Za(a,c,!0)?"num-fmt"+c:null},function(a,b){var c=b.oLanguage.sDecimal;
-return Rb(a,c)?"html-num"+c:null},function(a,b){var c=b.oLanguage.sDecimal;return Rb(a,c,!0)?"html-num-fmt"+c:null},function(a){return J(a)||"string"===typeof a&&-1!==a.indexOf("<")?"html":null}]);h.extend(m.ext.type.search,{html:function(a){return J(a)?a:"string"===typeof a?a.replace(Ob," ").replace(Ba,""):""},string:function(a){return J(a)?a:"string"===typeof a?a.replace(Ob," "):a}});var Aa=function(a,b,c,e){if(0!==a&&(!a||"-"===a))return-Infinity;b&&(a=Qb(a,b));a.replace&&(c&&(a=a.replace(c,"")),
-e&&(a=a.replace(e,"")));return 1*a};h.extend(u.type.order,{"date-pre":function(a){return Date.parse(a)||0},"html-pre":function(a){return J(a)?"":a.replace?a.replace(/<.*?>/g,"").toLowerCase():a+""},"string-pre":function(a){return J(a)?"":"string"===typeof a?a.toLowerCase():!a.toString?"":a.toString()},"string-asc":function(a,b){return a<b?-1:a>b?1:0},"string-desc":function(a,b){return a<b?1:a>b?-1:0}});db("");h.extend(!0,m.ext.renderer,{header:{_:function(a,b,c,e){h(a.nTable).on("order.dt.DT",function(d,
-f,g,h){if(a===f){d=c.idx;b.removeClass(c.sSortingClass+" "+e.sSortAsc+" "+e.sSortDesc).addClass(h[d]=="asc"?e.sSortAsc:h[d]=="desc"?e.sSortDesc:c.sSortingClass)}})},jqueryui:function(a,b,c,e){h("<div/>").addClass(e.sSortJUIWrapper).append(b.contents()).append(h("<span/>").addClass(e.sSortIcon+" "+c.sSortingClassJUI)).appendTo(b);h(a.nTable).on("order.dt.DT",function(d,f,g,h){if(a===f){d=c.idx;b.removeClass(e.sSortAsc+" "+e.sSortDesc).addClass(h[d]=="asc"?e.sSortAsc:h[d]=="desc"?e.sSortDesc:c.sSortingClass);
-b.find("span."+e.sSortIcon).removeClass(e.sSortJUIAsc+" "+e.sSortJUIDesc+" "+e.sSortJUI+" "+e.sSortJUIAscAllowed+" "+e.sSortJUIDescAllowed).addClass(h[d]=="asc"?e.sSortJUIAsc:h[d]=="desc"?e.sSortJUIDesc:c.sSortingClassJUI)}})}}});m.render={number:function(a,b,c,e){return{display:function(d){if("number"!==typeof d&&"string"!==typeof d)return d;var f=0>d?"-":"",d=Math.abs(parseFloat(d)),g=parseInt(d,10),d=c?b+(d-g).toFixed(c).substring(2):"";return f+(e||"")+g.toString().replace(/\B(?=(\d{3})+(?!\d))/g,
-a)+d}}}};h.extend(m.ext.internal,{_fnExternApiFunc:Nb,_fnBuildAjax:ra,_fnAjaxUpdate:kb,_fnAjaxParameters:tb,_fnAjaxUpdateDraw:ub,_fnAjaxDataSrc:sa,_fnAddColumn:Fa,_fnColumnOptions:ka,_fnAdjustColumnSizing:X,_fnVisibleToColumnIndex:la,_fnColumnIndexToVisible:$,_fnVisbleColumns:aa,_fnGetColumns:Z,_fnColumnTypes:Ha,_fnApplyColumnDefs:ib,_fnHungarianMap:W,_fnCamelToHungarian:H,_fnLanguageCompat:P,_fnBrowserDetect:gb,_fnAddData:K,_fnAddTr:ma,_fnNodeToDataIndex:function(a,b){return b._DT_RowIndex!==k?b._DT_RowIndex:
-null},_fnNodeToColumnIndex:function(a,b,c){return h.inArray(c,a.aoData[b].anCells)},_fnGetCellData:x,_fnSetCellData:Ia,_fnSplitObjNotation:Ka,_fnGetObjectDataFn:R,_fnSetObjectDataFn:S,_fnGetDataMaster:La,_fnClearTable:oa,_fnDeleteIndex:pa,_fnInvalidate:ca,_fnGetRowElements:na,_fnCreateTr:Ja,_fnBuildHead:jb,_fnDrawHead:ea,_fnDraw:M,_fnReDraw:N,_fnAddOptionsHtml:mb,_fnDetectHeader:da,_fnGetUniqueThs:qa,_fnFeatureHtmlFilter:ob,_fnFilterComplete:fa,_fnFilterCustom:xb,_fnFilterColumn:wb,_fnFilter:vb,_fnFilterCreateSearch:Qa,
-_fnEscapeRegex:va,_fnFilterData:yb,_fnFeatureHtmlInfo:rb,_fnUpdateInfo:Bb,_fnInfoMacros:Cb,_fnInitialise:ga,_fnInitComplete:ta,_fnLengthChange:Ra,_fnFeatureHtmlLength:nb,_fnFeatureHtmlPaginate:sb,_fnPageChange:Ta,_fnFeatureHtmlProcessing:pb,_fnProcessingDisplay:C,_fnFeatureHtmlTable:qb,_fnScrollDraw:Y,_fnApplyToChildren:G,_fnCalculateColumnWidths:Ga,_fnThrottle:ua,_fnConvertToWidth:Db,_fnScrollingWidthAdjust:Fb,_fnGetWidestNode:Eb,_fnGetMaxLenString:Gb,_fnStringToCss:s,_fnScrollBarWidth:Hb,_fnSortFlatten:U,
-_fnSort:lb,_fnSortAria:Jb,_fnSortListener:Ua,_fnSortAttachListener:Oa,_fnSortingClasses:xa,_fnSortData:Ib,_fnSaveState:ya,_fnLoadState:Kb,_fnSettingsFromNode:za,_fnLog:I,_fnMap:E,_fnBindAction:Va,_fnCallbackReg:z,_fnCallbackFire:w,_fnLengthOverflow:Sa,_fnRenderer:Pa,_fnDataSource:B,_fnRowAttributes:Ma,_fnCalculateEnd:function(){}});h.fn.dataTable=m;h.fn.dataTableSettings=m.settings;h.fn.dataTableExt=m.ext;h.fn.DataTable=function(a){return h(this).dataTable(a).api()};h.each(m,function(a,b){h.fn.DataTable[a]=
-b});return h.fn.dataTable};"function"===typeof define&&define.amd?define("datatables",["jquery"],P):"object"===typeof exports?module.exports=P(require("jquery")):jQuery&&!jQuery.fn.dataTable&&P(jQuery)})(window,document);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

Posted by su...@apache.org.
Merge branch 'trunk' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32551b49
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32551b49
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32551b49

Branch: refs/heads/HDFS-12943
Commit: 32551b49744dfa03563e00926821edf7031501ad
Parents: ddca0cf 478b2cb
Author: Chao Sun <su...@apache.org>
Authored: Wed Oct 31 11:20:57 2018 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Wed Oct 31 11:20:57 2018 -0700

----------------------------------------------------------------------
 LICENSE.txt                                     |   2 +-
 .../fs/CommonConfigurationKeysPublic.java       |  12 +
 .../hadoop/security/UserGroupInformation.java   | 192 ++++++-
 .../src/main/resources/core-default.xml         |   8 +
 .../src/site/markdown/CredentialProviderAPI.md  | 130 +++--
 .../hadoop/crypto/key/TestKeyProvider.java      |  32 +-
 .../hadoop/security/TestUGILoginFromKeytab.java |  56 ++
 .../security/TestUserGroupInformation.java      |   2 +-
 .../dev-support/findbugs-exclude.xml            |  33 ++
 hadoop-common-project/hadoop-registry/pom.xml   |  10 +
 .../hadoop/hdds/scm/XceiverClientGrpc.java      |  15 +-
 .../hadoop/hdds/scm/XceiverClientManager.java   |   6 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java     |  31 +-
 .../scm/client/ContainerOperationClient.java    |  46 +-
 .../hdds/scm/storage/ChunkOutputStream.java     |  17 +-
 .../common/src/main/conf/log4j.properties       | 157 ------
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  51 +-
 .../org/apache/hadoop/hdds/cli/GenericCli.java  |   5 +-
 .../hadoop/hdds/cli/GenericParentCommand.java   |   4 +
 .../org/apache/hadoop/hdds/client/BlockID.java  |  85 ++-
 .../hadoop/hdds/client/ContainerBlockID.java    |  79 +++
 .../hadoop/hdds/scm/XceiverClientSpi.java       |   2 +-
 .../hadoop/hdds/scm/client/ScmClient.java       |   8 +-
 .../hdds/scm/container/ContainerInfo.java       |  10 +-
 .../common/helpers/AllocatedBlock.java          |  22 +-
 .../common/helpers/ContainerWithPipeline.java   |   3 +-
 .../scm/container/common/helpers/Pipeline.java  | 319 -----------
 .../container/common/helpers/PipelineID.java    |  97 ----
 .../hadoop/hdds/scm/pipeline/Pipeline.java      |  41 +-
 .../scm/pipeline/PipelineNotFoundException.java |  46 ++
 .../StorageContainerLocationProtocol.java       |   2 +-
 ...kLocationProtocolClientSideTranslatorPB.java |   9 +-
 ...rLocationProtocolClientSideTranslatorPB.java |   6 +-
 .../scm/storage/ContainerProtocolCalls.java     |  29 +-
 .../apache/hadoop/ozone/common/BlockGroup.java  |   3 +-
 .../container/common/helpers/BlockData.java     |   8 +-
 ...kLocationProtocolServerSideTranslatorPB.java |   2 +-
 .../main/java/org/apache/ratis/RatisHelper.java |  15 +-
 .../main/proto/DatanodeContainerProtocol.proto  |  31 +-
 .../main/proto/ScmBlockLocationProtocol.proto   |   2 +-
 .../StorageContainerLocationProtocol.proto      |   4 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |  11 +-
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |  16 +-
 .../container/common/impl/ContainerData.java    |  22 +-
 .../common/impl/ContainerDataYaml.java          |   6 +-
 .../container/common/impl/HddsDispatcher.java   |  13 +-
 .../container/common/interfaces/Container.java  |   9 +-
 .../statemachine/DatanodeStateMachine.java      |   3 +
 .../transport/server/XceiverServerGrpc.java     |   2 +-
 .../transport/server/ratis/CSMMetrics.java      |   5 +-
 .../server/ratis/ContainerStateMachine.java     |  93 ++--
 .../server/ratis/XceiverServerRatis.java        |  36 +-
 .../container/common/volume/VolumeInfo.java     |  19 +-
 .../container/common/volume/VolumeSet.java      |  11 +-
 .../container/keyvalue/KeyValueContainer.java   |  31 +-
 .../keyvalue/KeyValueContainerData.java         |   9 +-
 .../container/keyvalue/KeyValueHandler.java     |  21 +-
 .../container/keyvalue/helpers/BlockUtils.java  |   2 -
 .../keyvalue/impl/BlockManagerImpl.java         |   6 +-
 .../keyvalue/interfaces/BlockManager.java       |   3 +-
 .../commands/CloseContainerCommand.java         |   2 +-
 .../StorageContainerDatanodeProtocol.proto      |  57 +-
 .../ozone/container/common/ScmTestMock.java     |  14 +-
 .../common/TestKeyValueContainerData.java       |   6 +-
 .../common/impl/TestContainerDataYaml.java      |   8 +-
 .../container/common/impl/TestContainerSet.java |  16 +-
 .../container/common/volume/TestHddsVolume.java |   9 +-
 .../container/common/volume/TestVolumeSet.java  |   4 +-
 .../keyvalue/TestBlockManagerImpl.java          |   6 +-
 .../keyvalue/TestKeyValueContainer.java         |  15 +-
 .../container/keyvalue/TestKeyValueHandler.java |   2 +-
 hadoop-hdds/pom.xml                             | 161 +++++-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   9 +-
 .../block/DatanodeDeletedBlockTransactions.java |   4 +-
 .../hdds/scm/block/DeletedBlockLogImpl.java     |   5 +-
 .../container/CloseContainerEventHandler.java   |   4 +-
 .../hdds/scm/container/ContainerManager.java    |   6 +-
 .../scm/container/ContainerReportHandler.java   |   2 +-
 .../scm/container/ContainerStateManager.java    |  25 +-
 .../hdds/scm/container/SCMContainerManager.java |  88 ++-
 .../hadoop/hdds/scm/events/SCMEvents.java       |   9 -
 .../hadoop/hdds/scm/node/NodeManager.java       |   4 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  |   4 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |   4 +-
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  |  30 +-
 .../hdds/scm/node/states/Node2PipelineMap.java  |   8 +-
 .../hdds/scm/pipeline/PipelineFactory.java      |   6 +-
 .../hdds/scm/pipeline/PipelineManager.java      |  10 +-
 .../hdds/scm/pipeline/PipelineProvider.java     |   2 +-
 .../scm/pipeline/PipelineReportHandler.java     |  16 +-
 .../hdds/scm/pipeline/PipelineStateManager.java |  27 +-
 .../hdds/scm/pipeline/PipelineStateMap.java     |  91 +++-
 .../scm/pipeline/RatisPipelineProvider.java     |  15 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java   |  37 +-
 .../scm/pipeline/SimplePipelineProvider.java    |  13 +-
 .../pipelines/PipelineActionEventHandler.java   |  62 ---
 .../scm/pipelines/PipelineCloseHandler.java     |  52 --
 .../hdds/scm/pipelines/PipelineManager.java     | 171 ------
 .../scm/pipelines/PipelineReportHandler.java    |  59 --
 .../hdds/scm/pipelines/PipelineSelector.java    | 481 ----------------
 .../scm/pipelines/PipelineStateManager.java     | 136 -----
 .../hadoop/hdds/scm/pipelines/package-info.java |  38 --
 .../scm/pipelines/ratis/RatisManagerImpl.java   | 129 -----
 .../hdds/scm/pipelines/ratis/package-info.java  |  18 -
 .../standalone/StandaloneManagerImpl.java       | 122 -----
 .../scm/pipelines/standalone/package-info.java  |  18 -
 .../scm/server/SCMClientProtocolServer.java     |   2 +-
 .../scm/server/StorageContainerManager.java     |  39 +-
 .../apache/hadoop/hdds/scm/HddsTestUtils.java   |   2 +-
 .../hadoop/hdds/scm/HddsWhiteboxTestUtils.java  | 103 ++++
 .../hadoop/hdds/scm/TestHddsServerUtils.java    | 153 ++++++
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  20 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |   9 +-
 .../hdds/scm/block/TestDeletedBlockLog.java     |  32 +-
 .../hdds/scm/container/MockNodeManager.java     |   4 +-
 .../TestCloseContainerEventHandler.java         |  20 +-
 .../container/TestContainerReportHandler.java   |  13 +-
 .../container/TestContainerStateManager.java    |  29 +-
 .../scm/container/TestSCMContainerManager.java  |  46 +-
 .../replication/TestReplicationManager.java     |  29 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   8 +-
 .../hdds/scm/node/TestDeadNodeHandler.java      |   7 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |   2 +-
 .../ozone/container/common/TestEndPoint.java    |   2 +-
 .../testutils/ReplicationNodeManagerMock.java   |   4 +-
 .../hdds/scm/cli/container/InfoSubcommand.java  |  13 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java     |  12 +
 .../src/CMakeLists.txt                          |  22 +-
 .../src/main/native/libhdfspp/CMakeLists.txt    |   4 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    |  12 +-
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  |   2 +-
 .../QJournalProtocolServerSideTranslatorPB.java |   2 +-
 .../block/BlockPoolTokenSecretManager.java      |  20 +
 .../token/block/BlockTokenSecretManager.java    |  22 +-
 .../hadoop/hdfs/server/balancer/Balancer.java   |  15 +-
 .../server/blockmanagement/BlockManager.java    |  26 +-
 .../blockmanagement/BlockPlacementPolicy.java   |   1 -
 .../CombinedHostFileManager.java                |   6 +-
 .../blockmanagement/CorruptReplicasMap.java     |   2 +-
 .../blockmanagement/DatanodeAdminManager.java   |   8 +-
 .../server/blockmanagement/HostFileManager.java |   7 +-
 .../hdfs/server/blockmanagement/HostSet.java    |   8 +-
 .../server/blockmanagement/SlowPeerTracker.java |   5 +-
 .../server/datanode/BlockPoolSliceStorage.java  |  60 +-
 .../server/datanode/BlockRecoveryWorker.java    |  15 +-
 .../hdfs/server/datanode/BlockScanner.java      |   6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  10 +-
 .../hdfs/server/datanode/DataStorage.java       |   4 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |   1 -
 .../hdfs/server/datanode/FileIoProvider.java    |   3 -
 .../hdfs/server/datanode/VolumeScanner.java     |   4 +-
 .../server/datanode/checker/AbstractFuture.java |  13 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |  12 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  13 +-
 .../datanode/metrics/OutlierDetector.java       |   3 +-
 .../diskbalancer/DiskBalancerException.java     |   1 -
 .../datamodel/DiskBalancerCluster.java          |  11 +-
 .../datamodel/DiskBalancerDataNode.java         |  10 +-
 .../diskbalancer/planner/GreedyPlanner.java     |   2 +-
 .../hadoop/hdfs/server/namenode/AclStorage.java |  18 +-
 .../server/namenode/EncryptionZoneManager.java  |  42 +-
 .../hdfs/server/namenode/FSDirectory.java       |   8 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  24 +-
 .../hadoop/hdfs/server/namenode/INode.java      |   4 +-
 .../hdfs/server/namenode/INodeReference.java    |   6 +-
 .../hdfs/server/namenode/INodesInPath.java      |   4 +-
 .../hdfs/server/namenode/JournalManager.java    |   2 +-
 .../hdfs/server/namenode/LeaseManager.java      |   2 +-
 .../server/namenode/MetaRecoveryContext.java    |   2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   6 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |   9 +-
 .../hadoop/hdfs/server/namenode/Quota.java      |   5 +-
 .../server/namenode/ReencryptionHandler.java    |   2 +-
 .../server/namenode/XAttrPermissionFilter.java  |   4 +-
 .../hdfs/server/namenode/XAttrStorage.java      |   8 +-
 .../snapshot/AbstractINodeDiffList.java         |   8 +-
 .../namenode/snapshot/DiffListBySkipList.java   |   9 +-
 .../sps/BlockStorageMovementNeeded.java         |   5 +-
 .../namenode/sps/DatanodeCacheManager.java      |   2 +-
 .../sps/StoragePolicySatisfyManager.java        |  14 +-
 .../startupprogress/StartupProgressView.java    |   4 +-
 .../server/namenode/top/metrics/TopMetrics.java |  17 +-
 .../namenode/top/window/RollingWindow.java      |  18 +-
 .../top/window/RollingWindowManager.java        |   2 +-
 .../protocol/BlockStorageMovementCommand.java   |  11 +-
 .../hdfs/server/protocol/DatanodeProtocol.java  |   2 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |   5 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java    |   2 +
 .../org/apache/hadoop/hdfs/tools/DFSck.java     |  13 +-
 .../offlineEditsViewer/OfflineEditsViewer.java  |   4 +-
 .../offlineEditsViewer/OfflineEditsVisitor.java |   2 +-
 .../StatisticsEditsVisitor.java                 |   4 +-
 .../NameDistributionVisitor.java                |   4 +-
 .../offlineImageViewer/PBImageTextWriter.java   |   4 +-
 .../java/org/apache/hadoop/hdfs/util/Diff.java  |  16 +-
 .../org/apache/hadoop/hdfs/util/XMLUtils.java   |   4 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  36 +-
 .../security/token/block/TestBlockToken.java    |   8 +-
 .../TestPendingReconstruction.java              |   2 +
 ...constructStripedBlocksWithRackAwareness.java |  11 +-
 .../fsdataset/impl/TestLazyPersistFiles.java    |  15 +-
 .../TestUpgradeDomainBlockPlacementPolicy.java  |  22 +-
 .../TestOfflineImageViewer.java                 |  19 +
 .../v2/app/rm/RMContainerAllocator.java         |  14 +-
 .../v2/app/rm/TestRMContainerAllocator.java     |  14 +
 hadoop-maven-plugins/pom.xml                    |   4 +
 .../ozone/client/io/ChunkGroupInputStream.java  |  10 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |  21 +-
 .../ozone/client/TestHddsClientUtils.java       | 137 ++++-
 .../src/main/conf/om-audit-log4j2.properties    |  90 ---
 .../common/src/main/conf/ozone-site.xml         |  24 -
 .../ozone/om/helpers/OmKeyLocationInfo.java     |  19 +-
 .../src/main/proto/OzoneManagerProtocol.proto   |   1 -
 .../dist/dev-support/bin/dist-layout-stitching  |   4 +-
 .../dist/src/main/conf/log4j.properties         | 157 ++++++
 .../src/main/conf/om-audit-log4j2.properties    |  90 +++
 hadoop-ozone/dist/src/main/conf/ozone-site.xml  |  24 +
 .../dist/src/main/smoketest/s3/README.md        |   2 +-
 .../dist/src/main/smoketest/s3/awss3.robot      |   4 +-
 .../dist/src/main/smoketest/s3/objectcopy.robot |  66 +++
 .../src/main/smoketest/s3/objectdelete.robot    |   6 +-
 .../main/smoketest/s3/objectmultidelete.robot   |   6 +-
 .../src/main/smoketest/s3/objectputget.robot    |   2 +-
 .../ozonedoc/layouts/partials/navbar.html       |   5 +-
 .../ozonedoc/layouts/partials/sidebar.html      |   8 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |  29 +-
 .../hdds/scm/pipeline/TestNodeFailure.java      |  27 +-
 .../hdds/scm/pipeline/TestPipelineClose.java    |  41 +-
 .../scm/pipeline/TestPipelineStateManager.java  | 171 +++---
 .../scm/pipeline/TestRatisPipelineProvider.java |  13 +-
 .../scm/pipeline/TestSCMPipelineManager.java    |  45 +-
 .../hdds/scm/pipeline/TestSCMRestart.java       |  23 +-
 .../pipeline/TestSimplePipelineProvider.java    |  13 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  16 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |  36 +-
 .../apache/hadoop/ozone/RatisTestHelper.java    |   2 +-
 .../TestContainerStateMachineIdempotency.java   |   2 +-
 .../hadoop/ozone/TestMiniOzoneCluster.java      |  23 +-
 .../ozone/TestStorageContainerManager.java      |   4 +-
 .../TestStorageContainerManagerHelper.java      |   5 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |   4 +-
 .../hadoop/ozone/client/rpc/TestBCSID.java      |   2 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  12 +-
 .../rpc/TestContainerStateMachineFailures.java  |   2 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java    |   6 +-
 .../ozone/container/ContainerTestHelper.java    |  76 +--
 .../container/TestContainerReplication.java     |   8 +-
 .../common/impl/TestCloseContainerHandler.java  |  12 +-
 .../common/impl/TestContainerPersistence.java   |  18 +-
 .../commandhandler/TestBlockDeletion.java       |  14 +-
 .../TestCloseContainerByPipeline.java           |   8 +-
 .../TestCloseContainerHandler.java              |   2 +-
 .../transport/server/ratis/TestCSMMetrics.java  |  14 +-
 .../container/metrics/TestContainerMetrics.java |   4 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   9 +-
 .../ozoneimpl/TestOzoneContainerRatis.java      |   6 +-
 .../container/server/TestContainerServer.java   |  21 +-
 .../apache/hadoop/ozone/om/TestOmMetrics.java   |  23 +-
 .../hadoop/ozone/ozShell/TestOzoneShell.java    |  77 +--
 .../hadoop/ozone/scm/TestAllocateContainer.java |   2 +-
 .../TestGetCommittedBlockLengthAndPutKey.java   |  18 +-
 .../hadoop/ozone/web/client/TestKeys.java       |   4 +-
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |   7 +-
 .../hadoop/ozone/web/ozShell/Handler.java       | 139 +----
 .../hadoop/ozone/web/ozShell/OzoneAddress.java  | 251 +++++++++
 .../web/ozShell/bucket/BucketCommands.java      |   6 +
 .../web/ozShell/bucket/CreateBucketHandler.java |  27 +-
 .../web/ozShell/bucket/DeleteBucketHandler.java |  20 +-
 .../web/ozShell/bucket/InfoBucketHandler.java   |  28 +-
 .../web/ozShell/bucket/ListBucketHandler.java   |  26 +-
 .../web/ozShell/bucket/S3BucketMapping.java     |  55 +-
 .../web/ozShell/bucket/UpdateBucketHandler.java |  20 +-
 .../web/ozShell/keys/DeleteKeyHandler.java      |  23 +-
 .../ozone/web/ozShell/keys/GetKeyHandler.java   |  18 +-
 .../ozone/web/ozShell/keys/InfoKeyHandler.java  |  28 +-
 .../ozone/web/ozShell/keys/KeyCommands.java     |   6 +
 .../ozone/web/ozShell/keys/ListKeyHandler.java  |  28 +-
 .../ozone/web/ozShell/keys/PutKeyHandler.java   |  21 +-
 .../web/ozShell/volume/CreateVolumeHandler.java |  28 +-
 .../web/ozShell/volume/DeleteVolumeHandler.java |   8 +-
 .../web/ozShell/volume/InfoVolumeHandler.java   |   8 +-
 .../web/ozShell/volume/ListVolumeHandler.java   |  27 +-
 .../web/ozShell/volume/UpdateVolumeHandler.java |   8 +-
 .../web/ozShell/volume/VolumeCommands.java      |   6 +
 .../ozone/om/ScmBlockLocationTestIngClient.java |  22 +-
 .../ozone/web/ozShell/TestOzoneAddress.java     | 100 ++++
 .../hadoop/ozone/web/ozShell/package-info.java  |  21 +
 .../ITestOzoneContractGetFileStatus.java        |   6 +-
 hadoop-ozone/pom.xml                            | 139 ++++-
 hadoop-ozone/s3gateway/pom.xml                  |   6 +
 .../ozone/s3/SignedChunksInputStream.java       |  99 ++++
 .../hadoop/ozone/s3/VirtualHostStyleFilter.java |  22 +
 .../ozone/s3/endpoint/BucketEndpoint.java       | 130 +++--
 .../ozone/s3/endpoint/CopyObjectResponse.java   |  63 +++
 .../hadoop/ozone/s3/endpoint/EndpointBase.java  |  46 +-
 .../ozone/s3/endpoint/ListObjectResponse.java   |  22 +
 .../ozone/s3/endpoint/MultiDeleteRequest.java   |   2 +-
 .../MultiDeleteRequestUnmarshaller.java         |  84 +++
 .../ozone/s3/endpoint/ObjectEndpoint.java       | 176 +++++-
 .../hadoop/ozone/s3/endpoint/RootEndpoint.java  |   6 +-
 .../ozone/s3/endpoint/XmlNamespaceFilter.java   |  54 ++
 .../hadoop/ozone/s3/exception/S3ErrorTable.java |   6 +
 .../s3/header/AuthenticationHeaderParser.java   |  61 +++
 .../apache/hadoop/ozone/s3/util/S3Consts.java   |  20 +
 .../hadoop/ozone/s3/util/S3StorageType.java     |  55 ++
 .../apache/hadoop/ozone/s3/util/S3utils.java    |  73 +++
 .../hadoop/ozone/s3/util/package-info.java      |  22 +
 .../hadoop/ozone/client/OzoneBucketStub.java    |   7 +-
 .../ozone/s3/TestSignedChunksInputStream.java   |  84 +++
 .../ozone/s3/TestVirtualHostStyleFilter.java    |  20 +-
 .../hadoop/ozone/s3/endpoint/TestBucketGet.java | 227 +++++++-
 .../TestMultiDeleteRequestUnmarshaller.java     |  76 +++
 .../hadoop/ozone/s3/endpoint/TestObjectGet.java |   4 +-
 .../s3/endpoint/TestObjectMultiDelete.java      |  61 ++-
 .../hadoop/ozone/s3/endpoint/TestPutObject.java | 164 +++++-
 .../hadoop/ozone/s3/endpoint/TestRootList.java  |  22 +-
 .../genesis/BenchMarkContainerStateMap.java     |  27 +-
 .../genesis/BenchMarkDatanodeDispatcher.java    |   6 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   2 +-
 .../freon/TestFreonWithDatanodeFastRestart.java | 130 +++++
 .../freon/TestFreonWithDatanodeRestart.java     |  53 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |   7 +-
 hadoop-project/pom.xml                          | 224 ++------
 .../site/markdown/tools/hadoop-aliyun/index.md  |  36 ++
 .../azurebfs/oauth2/AzureADAuthenticator.java   |   7 +-
 .../dev-support/findbugs-exclude.xml            |  16 -
 .../hadoop/yarn/api/ApplicationConstants.java   |   9 +-
 .../yarn/api/records/ResourceUtilization.java   |   8 +-
 .../hadoop/yarn/service/ServiceMaster.java      |   7 +
 .../hadoop/yarn/service/ServiceScheduler.java   |  67 +++
 .../yarn/service/client/ServiceClient.java      | 141 ++++-
 .../hadoop/yarn/service/utils/HttpUtil.java     | 123 +++++
 .../yarn/service/utils/ServiceApiUtil.java      |   2 -
 .../hadoop/yarn/service/TestServiceAM.java      |  32 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |  10 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java       |   6 +-
 .../static/dt-1.10.18/css/custom_datatable.css  |  68 +++
 .../webapps/static/dt-1.10.18/css/demo_page.css | 108 ++++
 .../static/dt-1.10.18/css/demo_table.css        | 544 +++++++++++++++++++
 .../static/dt-1.10.18/css/jquery.dataTables.css | 466 ++++++++++++++++
 .../webapps/static/dt-1.10.18/css/jui-dt.css    | 352 ++++++++++++
 .../static/dt-1.10.18/images/Sorting icons.psd  | Bin 0 -> 27490 bytes
 .../static/dt-1.10.18/images/back_disabled.jpg  | Bin 0 -> 612 bytes
 .../static/dt-1.10.18/images/back_enabled.jpg   | Bin 0 -> 807 bytes
 .../static/dt-1.10.18/images/favicon.ico        | Bin 0 -> 894 bytes
 .../dt-1.10.18/images/forward_disabled.jpg      | Bin 0 -> 635 bytes
 .../dt-1.10.18/images/forward_enabled.jpg       | Bin 0 -> 852 bytes
 .../static/dt-1.10.18/images/sort_asc.png       | Bin 0 -> 263 bytes
 .../dt-1.10.18/images/sort_asc_disabled.png     | Bin 0 -> 252 bytes
 .../static/dt-1.10.18/images/sort_both.png      | Bin 0 -> 282 bytes
 .../static/dt-1.10.18/images/sort_desc.png      | Bin 0 -> 260 bytes
 .../dt-1.10.18/images/sort_desc_disabled.png    | Bin 0 -> 251 bytes
 .../dt-1.10.18/js/jquery.dataTables.min.js      | 184 +++++++
 .../webapps/static/dt-1.10.7/css/demo_page.css  | 110 ----
 .../webapps/static/dt-1.10.7/css/demo_table.css | 538 ------------------
 .../webapps/static/dt-1.10.7/css/jui-dt.css     | 322 -----------
 .../static/dt-1.10.7/images/Sorting icons.psd   | Bin 27490 -> 0 bytes
 .../static/dt-1.10.7/images/back_disabled.jpg   | Bin 612 -> 0 bytes
 .../static/dt-1.10.7/images/back_enabled.jpg    | Bin 807 -> 0 bytes
 .../webapps/static/dt-1.10.7/images/favicon.ico | Bin 894 -> 0 bytes
 .../dt-1.10.7/images/forward_disabled.jpg       | Bin 635 -> 0 bytes
 .../static/dt-1.10.7/images/forward_enabled.jpg | Bin 852 -> 0 bytes
 .../static/dt-1.10.7/images/sort_asc.png        | Bin 263 -> 0 bytes
 .../dt-1.10.7/images/sort_asc_disabled.png      | Bin 252 -> 0 bytes
 .../static/dt-1.10.7/images/sort_both.png       | Bin 282 -> 0 bytes
 .../static/dt-1.10.7/images/sort_desc.png       | Bin 260 -> 0 bytes
 .../dt-1.10.7/images/sort_desc_disabled.png     | Bin 251 -> 0 bytes
 .../dt-1.10.7/js/jquery.dataTables.min.js       | 160 ------
 .../dev-support/findbugs-exclude.xml            |   4 +-
 .../hadoop-yarn/hadoop-yarn-csi/pom.xml         |   7 +
 .../hadoop/yarn/csi/client/CsiClient.java       |   2 +-
 .../hadoop/yarn/csi/client/CsiGrpcClient.java   |   1 -
 .../server/nodemanager/ContainerExecutor.java   |  12 +
 .../nodemanager/DefaultContainerExecutor.java   |   7 +
 .../nodemanager/LinuxContainerExecutor.java     |  43 ++
 .../linux/privileged/PrivilegedOperation.java   |   6 +-
 .../linux/resources/CGroupsHandler.java         |   2 +-
 .../linux/resources/CGroupsHandlerImpl.java     |   4 +-
 .../CGroupsMemoryResourceHandlerImpl.java       |  25 -
 .../linux/resources/DefaultOOMHandler.java      |  45 +-
 .../linux/resources/MemoryResourceHandler.java  |  10 -
 .../runtime/DockerLinuxContainerRuntime.java    |  18 +
 .../linux/runtime/docker/DockerRunCommand.java  |   6 +
 .../monitor/ContainersMonitor.java              |   6 +-
 .../monitor/ContainersMonitorImpl.java          | 119 ++--
 ...locationBasedResourceUtilizationTracker.java |  27 +-
 .../scheduler/ContainerScheduler.java           |   5 +-
 .../nodemanager/webapp/NMWebServices.java       |  27 +
 .../impl/container-executor.c                   | 127 ++++-
 .../impl/container-executor.h                   |  22 +-
 .../main/native/container-executor/impl/main.c  |  30 +-
 .../impl/utils/string-utils.c                   |   9 +
 .../impl/utils/string-utils.h                   |   6 +
 .../test/test-container-executor.c              |  61 +++
 .../nodemanager/TestLinuxContainerExecutor.java |  11 +
 .../TestContainerManagerRecovery.java           |   2 +-
 .../linux/resources/TestCGroupsHandlerImpl.java |   2 +-
 .../TestCGroupsMemoryResourceHandlerImpl.java   |  44 --
 .../linux/resources/TestDefaultOOMHandler.java  | 434 ++++++++++++---
 .../TestContainersMonitorResourceChange.java    |   4 +
 ...locationBasedResourceUtilizationTracker.java |  18 -
 .../TestContainerSchedulerRecovery.java         |  79 +--
 .../capacity/TestContainerAllocation.java       |   4 +
 .../pom.xml                                     |   8 +
 .../pom.xml                                     |   8 +
 .../src/site/markdown/CapacityScheduler.md      |   2 +-
 .../src/site/markdown/DockerContainers.md       |  16 +
 .../site/markdown/NodeManagerCGroupsMemory.md   |  12 +-
 .../src/site/markdown/NodeManagerCgroups.md     |   4 +-
 .../src/site/markdown/TimelineServiceV2.md      |  17 +
 .../markdown/yarn-service/ServiceUpgrade.md     |  38 +-
 pom.xml                                         |   2 -
 412 files changed, 8779 insertions(+), 5753 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32551b49/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32551b49/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32551b49/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HADOOP-15882. Upgrade maven-shade-plugin from 2.4.3 to 3.2.0. Contributed by Takanobu Asanuma.

Posted by su...@apache.org.
HADOOP-15882. Upgrade maven-shade-plugin from 2.4.3 to 3.2.0. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34b2521f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34b2521f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34b2521f

Branch: refs/heads/HDFS-12943
Commit: 34b2521f5e7388e6a4d76af537ef147206fd9b72
Parents: f76e3c3
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Thu Oct 25 13:29:59 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Thu Oct 25 13:29:59 2018 -0700

----------------------------------------------------------------------
 hadoop-maven-plugins/pom.xml | 4 ++++
 hadoop-project/pom.xml       | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b2521f/hadoop-maven-plugins/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml
index 16bad69..b0f6ef4 100644
--- a/hadoop-maven-plugins/pom.xml
+++ b/hadoop-maven-plugins/pom.xml
@@ -94,6 +94,10 @@
           <groupId>org.vafer</groupId>
           <artifactId>jdependency</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.sonatype.sisu</groupId>
+          <artifactId>sisu-inject-plexus</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
   </dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b2521f/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index d6e7006..e9c5c0f 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -137,7 +137,7 @@
     <maven-compiler-plugin.version>3.1</maven-compiler-plugin.version>
     <maven-install-plugin.version>2.5.1</maven-install-plugin.version>
     <maven-resources-plugin.version>2.6</maven-resources-plugin.version>
-    <maven-shade-plugin.version>2.4.3</maven-shade-plugin.version>
+    <maven-shade-plugin.version>3.2.0</maven-shade-plugin.version>
     <maven-jar-plugin.version>2.5</maven-jar-plugin.version>
     <maven-war-plugin.version>3.1.0</maven-war-plugin.version>
     <maven-source-plugin.version>2.3</maven-source-plugin.version>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDDS-739. Support MultiDeleteRequest without XML namespace. Contributed by Elek Marton.

Posted by su...@apache.org.
HDDS-739. Support MultiDeleteRequest without XML namespace. Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/199703f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/199703f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/199703f9

Branch: refs/heads/HDFS-12943
Commit: 199703f9853b8dfd9df114030208ed45fc830c2b
Parents: bfb9adc
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Sat Oct 27 09:32:28 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Sat Oct 27 09:32:28 2018 -0700

----------------------------------------------------------------------
 .../ozone/s3/endpoint/MultiDeleteRequest.java   |  2 +-
 .../MultiDeleteRequestUnmarshaller.java         | 84 ++++++++++++++++++++
 .../ozone/s3/endpoint/XmlNamespaceFilter.java   | 54 +++++++++++++
 .../TestMultiDeleteRequestUnmarshaller.java     | 76 ++++++++++++++++++
 4 files changed, 215 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/199703f9/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java
index d9dd043..45b8322 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequest.java
@@ -34,7 +34,7 @@ import java.util.List;
 public class MultiDeleteRequest {
 
   @XmlElement(name = "Quiet")
-  private boolean quiet;
+  private Boolean quiet = Boolean.FALSE;
 
   @XmlElement(name = "Object")
   private List<DeleteObject> objects = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/199703f9/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
new file mode 100644
index 0000000..e8ed515
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultiDeleteRequestUnmarshaller.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import javax.ws.rs.Produces;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.ext.MessageBodyReader;
+import javax.ws.rs.ext.Provider;
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.UnmarshallerHandler;
+import javax.xml.parsers.SAXParserFactory;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.Annotation;
+import java.lang.reflect.Type;
+
+import org.xml.sax.InputSource;
+import org.xml.sax.XMLReader;
+
+/**
+ * Custom unmarshaller to read MultiDeleteRequest w/wo namespace.
+ */
+@Provider
+@Produces(MediaType.APPLICATION_XML)
+public class MultiDeleteRequestUnmarshaller
+    implements MessageBodyReader<MultiDeleteRequest> {
+
+  private final JAXBContext context;
+  private final XMLReader xmlReader;
+
+  public MultiDeleteRequestUnmarshaller() {
+    try {
+      context = JAXBContext.newInstance(MultiDeleteRequest.class);
+      SAXParserFactory saxParserFactory = SAXParserFactory.newInstance();
+      xmlReader = saxParserFactory.newSAXParser().getXMLReader();
+    } catch (Exception ex) {
+      throw new AssertionError("Can't instantiate MultiDeleteRequest parser",
+          ex);
+    }
+  }
+
+  @Override
+  public boolean isReadable(Class<?> type, Type genericType,
+      Annotation[] annotations, MediaType mediaType) {
+    return type.equals(MultiDeleteRequest.class);
+  }
+
+  @Override
+  public MultiDeleteRequest readFrom(Class<MultiDeleteRequest> type,
+      Type genericType, Annotation[] annotations, MediaType mediaType,
+      MultivaluedMap<String, String> httpHeaders, InputStream entityStream)
+      throws IOException, WebApplicationException {
+    try {
+      UnmarshallerHandler unmarshallerHandler =
+          context.createUnmarshaller().getUnmarshallerHandler();
+
+      XmlNamespaceFilter filter =
+          new XmlNamespaceFilter("http://s3.amazonaws.com/doc/2006-03-01/");
+      filter.setContentHandler(unmarshallerHandler);
+      filter.setParent(xmlReader);
+      filter.parse(new InputSource(entityStream));
+      return (MultiDeleteRequest) unmarshallerHandler.getResult();
+    } catch (Exception e) {
+      throw new WebApplicationException("Can't parse request body to XML.", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/199703f9/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java
new file mode 100644
index 0000000..a49ecf6
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/XmlNamespaceFilter.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import org.xml.sax.Attributes;
+import org.xml.sax.SAXException;
+import org.xml.sax.helpers.XMLFilterImpl;
+
+/**
+ * SAX filter to force namespace usage.
+ * <p>
+ * This filter will read the XML content as namespace qualified content
+ * independent from the current namespace usage.
+ */
+public class XmlNamespaceFilter extends XMLFilterImpl {
+
+  private String namespace;
+
+  /**
+   * Create the filter.
+   *
+   * @param namespace to add to every elements.
+   */
+  public XmlNamespaceFilter(String namespace) {
+    this.namespace = namespace;
+  }
+
+  @Override
+  public void startElement(String uri, String localName, String qName,
+      Attributes atts) throws SAXException {
+    super.startElement(namespace, localName, qName, atts);
+  }
+
+  @Override
+  public void endElement(String uri, String localName, String qName)
+      throws SAXException {
+    super.endElement(namespace, localName, qName);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/199703f9/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java
new file mode 100644
index 0000000..b3b1be0
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultiDeleteRequestUnmarshaller.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import org.junit.Assert;
+import static org.junit.Assert.*;
+import org.junit.Test;
+
+/**
+ * Test custom marshalling of MultiDeleteRequest.
+ */
+public class TestMultiDeleteRequestUnmarshaller {
+
+  @Test
+  public void fromStreamWithNamespace() throws IOException {
+    //GIVEN
+    ByteArrayInputStream inputBody =
+        new ByteArrayInputStream(
+            ("<Delete xmlns=\"http://s3.amazonaws"
+                + ".com/doc/2006-03-01/\"><Object>key1</Object><Object>key2"
+                + "</Object><Object>key3"
+                + "</Object></Delete>")
+                .getBytes());
+
+    //WHEN
+    MultiDeleteRequest multiDeleteRequest =
+        unmarshall(inputBody);
+
+    //THEN
+    Assert.assertEquals(3, multiDeleteRequest.getObjects().size());
+  }
+
+  @Test
+  public void fromStreamWithoutNamespace() throws IOException {
+    //GIVEN
+    ByteArrayInputStream inputBody =
+        new ByteArrayInputStream(
+            ("<Delete><Object>key1</Object><Object>key2"
+                + "</Object><Object>key3"
+                + "</Object></Delete>")
+                .getBytes());
+
+    //WHEN
+    MultiDeleteRequest multiDeleteRequest =
+        unmarshall(inputBody);
+
+    //THEN
+    Assert.assertEquals(3, multiDeleteRequest.getObjects().size());
+  }
+
+  private MultiDeleteRequest unmarshall(ByteArrayInputStream inputBody)
+      throws IOException {
+    return new MultiDeleteRequestUnmarshaller()
+        .readFrom(null, null, null, null, null, inputBody);
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: YARN-8854. Upgrade jquery datatable version references to v1.10.19. Contributed by Akhil PB.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/js/jquery.dataTables.min.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/js/jquery.dataTables.min.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/js/jquery.dataTables.min.js
new file mode 100644
index 0000000..3a79ccc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/js/jquery.dataTables.min.js
@@ -0,0 +1,184 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/*!
+ DataTables 1.10.18
+ ©2008-2018 SpryMedia Ltd - datatables.net/license
+*/
+(function(h){"function"===typeof define&&define.amd?define(["jquery"],function(E){return h(E,window,document)}):"object"===typeof exports?module.exports=function(E,H){E||(E=window);H||(H="undefined"!==typeof window?require("jquery"):require("jquery")(E));return h(H,E,E.document)}:h(jQuery,window,document)})(function(h,E,H,k){function Z(a){var b,c,d={};h.each(a,function(e){if((b=e.match(/^([^A-Z]+?)([A-Z])/))&&-1!=="a aa ai ao as b fn i m o s ".indexOf(b[1]+" "))c=e.replace(b[0],b[2].toLowerCase()),
+d[c]=e,"o"===b[1]&&Z(a[e])});a._hungarianMap=d}function J(a,b,c){a._hungarianMap||Z(a);var d;h.each(b,function(e){d=a._hungarianMap[e];if(d!==k&&(c||b[d]===k))"o"===d.charAt(0)?(b[d]||(b[d]={}),h.extend(!0,b[d],b[e]),J(a[d],b[d],c)):b[d]=b[e]})}function Ca(a){var b=n.defaults.oLanguage,c=b.sDecimal;c&&Da(c);if(a){var d=a.sZeroRecords;!a.sEmptyTable&&(d&&"No data available in table"===b.sEmptyTable)&&F(a,a,"sZeroRecords","sEmptyTable");!a.sLoadingRecords&&(d&&"Loading..."===b.sLoadingRecords)&&F(a,
+a,"sZeroRecords","sLoadingRecords");a.sInfoThousands&&(a.sThousands=a.sInfoThousands);(a=a.sDecimal)&&c!==a&&Da(a)}}function eb(a){A(a,"ordering","bSort");A(a,"orderMulti","bSortMulti");A(a,"orderClasses","bSortClasses");A(a,"orderCellsTop","bSortCellsTop");A(a,"order","aaSorting");A(a,"orderFixed","aaSortingFixed");A(a,"paging","bPaginate");A(a,"pagingType","sPaginationType");A(a,"pageLength","iDisplayLength");A(a,"searching","bFilter");"boolean"===typeof a.sScrollX&&(a.sScrollX=a.sScrollX?"100%":
+"");"boolean"===typeof a.scrollX&&(a.scrollX=a.scrollX?"100%":"");if(a=a.aoSearchCols)for(var b=0,c=a.length;b<c;b++)a[b]&&J(n.models.oSearch,a[b])}function fb(a){A(a,"orderable","bSortable");A(a,"orderData","aDataSort");A(a,"orderSequence","asSorting");A(a,"orderDataType","sortDataType");var b=a.aDataSort;"number"===typeof b&&!h.isArray(b)&&(a.aDataSort=[b])}function gb(a){if(!n.__browser){var b={};n.__browser=b;var c=h("<div/>").css({position:"fixed",top:0,left:-1*h(E).scrollLeft(),height:1,width:1,
+overflow:"hidden"}).append(h("<div/>").css({position:"absolute",top:1,left:1,width:100,overflow:"scroll"}).append(h("<div/>").css({width:"100%",height:10}))).appendTo("body"),d=c.children(),e=d.children();b.barWidth=d[0].offsetWidth-d[0].clientWidth;b.bScrollOversize=100===e[0].offsetWidth&&100!==d[0].clientWidth;b.bScrollbarLeft=1!==Math.round(e.offset().left);b.bBounding=c[0].getBoundingClientRect().width?!0:!1;c.remove()}h.extend(a.oBrowser,n.__browser);a.oScroll.iBarWidth=n.__browser.barWidth}
+function hb(a,b,c,d,e,f){var g,j=!1;c!==k&&(g=c,j=!0);for(;d!==e;)a.hasOwnProperty(d)&&(g=j?b(g,a[d],d,a):a[d],j=!0,d+=f);return g}function Ea(a,b){var c=n.defaults.column,d=a.aoColumns.length,c=h.extend({},n.models.oColumn,c,{nTh:b?b:H.createElement("th"),sTitle:c.sTitle?c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[d],mData:c.mData?c.mData:d,idx:d});a.aoColumns.push(c);c=a.aoPreSearchCols;c[d]=h.extend({},n.models.oSearch,c[d]);ka(a,d,h(b).data())}function ka(a,b,c){var b=a.aoColumns[b],
+d=a.oClasses,e=h(b.nTh);if(!b.sWidthOrig){b.sWidthOrig=e.attr("width")||null;var f=(e.attr("style")||"").match(/width:\s*(\d+[pxem%]+)/);f&&(b.sWidthOrig=f[1])}c!==k&&null!==c&&(fb(c),J(n.defaults.column,c),c.mDataProp!==k&&!c.mData&&(c.mData=c.mDataProp),c.sType&&(b._sManualType=c.sType),c.className&&!c.sClass&&(c.sClass=c.className),c.sClass&&e.addClass(c.sClass),h.extend(b,c),F(b,c,"sWidth","sWidthOrig"),c.iDataSort!==k&&(b.aDataSort=[c.iDataSort]),F(b,c,"aDataSort"));var g=b.mData,j=S(g),i=b.mRender?
+S(b.mRender):null,c=function(a){return"string"===typeof a&&-1!==a.indexOf("@")};b._bAttrSrc=h.isPlainObject(g)&&(c(g.sort)||c(g.type)||c(g.filter));b._setter=null;b.fnGetData=function(a,b,c){var d=j(a,b,k,c);return i&&b?i(d,b,a,c):d};b.fnSetData=function(a,b,c){return N(g)(a,b,c)};"number"!==typeof g&&(a._rowReadObject=!0);a.oFeatures.bSort||(b.bSortable=!1,e.addClass(d.sSortableNone));a=-1!==h.inArray("asc",b.asSorting);c=-1!==h.inArray("desc",b.asSorting);!b.bSortable||!a&&!c?(b.sSortingClass=d.sSortableNone,
+b.sSortingClassJUI=""):a&&!c?(b.sSortingClass=d.sSortableAsc,b.sSortingClassJUI=d.sSortJUIAscAllowed):!a&&c?(b.sSortingClass=d.sSortableDesc,b.sSortingClassJUI=d.sSortJUIDescAllowed):(b.sSortingClass=d.sSortable,b.sSortingClassJUI=d.sSortJUI)}function $(a){if(!1!==a.oFeatures.bAutoWidth){var b=a.aoColumns;Fa(a);for(var c=0,d=b.length;c<d;c++)b[c].nTh.style.width=b[c].sWidth}b=a.oScroll;(""!==b.sY||""!==b.sX)&&la(a);r(a,null,"column-sizing",[a])}function aa(a,b){var c=ma(a,"bVisible");return"number"===
+typeof c[b]?c[b]:null}function ba(a,b){var c=ma(a,"bVisible"),c=h.inArray(b,c);return-1!==c?c:null}function V(a){var b=0;h.each(a.aoColumns,function(a,d){d.bVisible&&"none"!==h(d.nTh).css("display")&&b++});return b}function ma(a,b){var c=[];h.map(a.aoColumns,function(a,e){a[b]&&c.push(e)});return c}function Ga(a){var b=a.aoColumns,c=a.aoData,d=n.ext.type.detect,e,f,g,j,i,h,l,q,t;e=0;for(f=b.length;e<f;e++)if(l=b[e],t=[],!l.sType&&l._sManualType)l.sType=l._sManualType;else if(!l.sType){g=0;for(j=d.length;g<
+j;g++){i=0;for(h=c.length;i<h;i++){t[i]===k&&(t[i]=B(a,i,e,"type"));q=d[g](t[i],a);if(!q&&g!==d.length-1)break;if("html"===q)break}if(q){l.sType=q;break}}l.sType||(l.sType="string")}}function ib(a,b,c,d){var e,f,g,j,i,m,l=a.aoColumns;if(b)for(e=b.length-1;0<=e;e--){m=b[e];var q=m.targets!==k?m.targets:m.aTargets;h.isArray(q)||(q=[q]);f=0;for(g=q.length;f<g;f++)if("number"===typeof q[f]&&0<=q[f]){for(;l.length<=q[f];)Ea(a);d(q[f],m)}else if("number"===typeof q[f]&&0>q[f])d(l.length+q[f],m);else if("string"===
+typeof q[f]){j=0;for(i=l.length;j<i;j++)("_all"==q[f]||h(l[j].nTh).hasClass(q[f]))&&d(j,m)}}if(c){e=0;for(a=c.length;e<a;e++)d(e,c[e])}}function O(a,b,c,d){var e=a.aoData.length,f=h.extend(!0,{},n.models.oRow,{src:c?"dom":"data",idx:e});f._aData=b;a.aoData.push(f);for(var g=a.aoColumns,j=0,i=g.length;j<i;j++)g[j].sType=null;a.aiDisplayMaster.push(e);b=a.rowIdFn(b);b!==k&&(a.aIds[b]=f);(c||!a.oFeatures.bDeferRender)&&Ha(a,e,c,d);return e}function na(a,b){var c;b instanceof h||(b=h(b));return b.map(function(b,
+e){c=Ia(a,e);return O(a,c.data,e,c.cells)})}function B(a,b,c,d){var e=a.iDraw,f=a.aoColumns[c],g=a.aoData[b]._aData,j=f.sDefaultContent,i=f.fnGetData(g,d,{settings:a,row:b,col:c});if(i===k)return a.iDrawError!=e&&null===j&&(K(a,0,"Requested unknown parameter "+("function"==typeof f.mData?"{function}":"'"+f.mData+"'")+" for row "+b+", column "+c,4),a.iDrawError=e),j;if((i===g||null===i)&&null!==j&&d!==k)i=j;else if("function"===typeof i)return i.call(g);return null===i&&"display"==d?"":i}function jb(a,
+b,c,d){a.aoColumns[c].fnSetData(a.aoData[b]._aData,d,{settings:a,row:b,col:c})}function Ja(a){return h.map(a.match(/(\\.|[^\.])+/g)||[""],function(a){return a.replace(/\\\./g,".")})}function S(a){if(h.isPlainObject(a)){var b={};h.each(a,function(a,c){c&&(b[a]=S(c))});return function(a,c,f,g){var j=b[c]||b._;return j!==k?j(a,c,f,g):a}}if(null===a)return function(a){return a};if("function"===typeof a)return function(b,c,f,g){return a(b,c,f,g)};if("string"===typeof a&&(-1!==a.indexOf(".")||-1!==a.indexOf("[")||
+-1!==a.indexOf("("))){var c=function(a,b,f){var g,j;if(""!==f){j=Ja(f);for(var i=0,m=j.length;i<m;i++){f=j[i].match(ca);g=j[i].match(W);if(f){j[i]=j[i].replace(ca,"");""!==j[i]&&(a=a[j[i]]);g=[];j.splice(0,i+1);j=j.join(".");if(h.isArray(a)){i=0;for(m=a.length;i<m;i++)g.push(c(a[i],b,j))}a=f[0].substring(1,f[0].length-1);a=""===a?g:g.join(a);break}else if(g){j[i]=j[i].replace(W,"");a=a[j[i]]();continue}if(null===a||a[j[i]]===k)return k;a=a[j[i]]}}return a};return function(b,e){return c(b,e,a)}}return function(b){return b[a]}}
+function N(a){if(h.isPlainObject(a))return N(a._);if(null===a)return function(){};if("function"===typeof a)return function(b,d,e){a(b,"set",d,e)};if("string"===typeof a&&(-1!==a.indexOf(".")||-1!==a.indexOf("[")||-1!==a.indexOf("("))){var b=function(a,d,e){var e=Ja(e),f;f=e[e.length-1];for(var g,j,i=0,m=e.length-1;i<m;i++){g=e[i].match(ca);j=e[i].match(W);if(g){e[i]=e[i].replace(ca,"");a[e[i]]=[];f=e.slice();f.splice(0,i+1);g=f.join(".");if(h.isArray(d)){j=0;for(m=d.length;j<m;j++)f={},b(f,d[j],g),
+a[e[i]].push(f)}else a[e[i]]=d;return}j&&(e[i]=e[i].replace(W,""),a=a[e[i]](d));if(null===a[e[i]]||a[e[i]]===k)a[e[i]]={};a=a[e[i]]}if(f.match(W))a[f.replace(W,"")](d);else a[f.replace(ca,"")]=d};return function(c,d){return b(c,d,a)}}return function(b,d){b[a]=d}}function Ka(a){return D(a.aoData,"_aData")}function oa(a){a.aoData.length=0;a.aiDisplayMaster.length=0;a.aiDisplay.length=0;a.aIds={}}function pa(a,b,c){for(var d=-1,e=0,f=a.length;e<f;e++)a[e]==b?d=e:a[e]>b&&a[e]--; -1!=d&&c===k&&a.splice(d,
+1)}function da(a,b,c,d){var e=a.aoData[b],f,g=function(c,d){for(;c.childNodes.length;)c.removeChild(c.firstChild);c.innerHTML=B(a,b,d,"display")};if("dom"===c||(!c||"auto"===c)&&"dom"===e.src)e._aData=Ia(a,e,d,d===k?k:e._aData).data;else{var j=e.anCells;if(j)if(d!==k)g(j[d],d);else{c=0;for(f=j.length;c<f;c++)g(j[c],c)}}e._aSortData=null;e._aFilterData=null;g=a.aoColumns;if(d!==k)g[d].sType=null;else{c=0;for(f=g.length;c<f;c++)g[c].sType=null;La(a,e)}}function Ia(a,b,c,d){var e=[],f=b.firstChild,g,
+j,i=0,m,l=a.aoColumns,q=a._rowReadObject,d=d!==k?d:q?{}:[],t=function(a,b){if("string"===typeof a){var c=a.indexOf("@");-1!==c&&(c=a.substring(c+1),N(a)(d,b.getAttribute(c)))}},G=function(a){if(c===k||c===i)j=l[i],m=h.trim(a.innerHTML),j&&j._bAttrSrc?(N(j.mData._)(d,m),t(j.mData.sort,a),t(j.mData.type,a),t(j.mData.filter,a)):q?(j._setter||(j._setter=N(j.mData)),j._setter(d,m)):d[i]=m;i++};if(f)for(;f;){g=f.nodeName.toUpperCase();if("TD"==g||"TH"==g)G(f),e.push(f);f=f.nextSibling}else{e=b.anCells;
+f=0;for(g=e.length;f<g;f++)G(e[f])}if(b=b.firstChild?b:b.nTr)(b=b.getAttribute("id"))&&N(a.rowId)(d,b);return{data:d,cells:e}}function Ha(a,b,c,d){var e=a.aoData[b],f=e._aData,g=[],j,i,m,l,q;if(null===e.nTr){j=c||H.createElement("tr");e.nTr=j;e.anCells=g;j._DT_RowIndex=b;La(a,e);l=0;for(q=a.aoColumns.length;l<q;l++){m=a.aoColumns[l];i=c?d[l]:H.createElement(m.sCellType);i._DT_CellIndex={row:b,column:l};g.push(i);if((!c||m.mRender||m.mData!==l)&&(!h.isPlainObject(m.mData)||m.mData._!==l+".display"))i.innerHTML=
+B(a,b,l,"display");m.sClass&&(i.className+=" "+m.sClass);m.bVisible&&!c?j.appendChild(i):!m.bVisible&&c&&i.parentNode.removeChild(i);m.fnCreatedCell&&m.fnCreatedCell.call(a.oInstance,i,B(a,b,l),f,b,l)}r(a,"aoRowCreatedCallback",null,[j,f,b,g])}e.nTr.setAttribute("role","row")}function La(a,b){var c=b.nTr,d=b._aData;if(c){var e=a.rowIdFn(d);e&&(c.id=e);d.DT_RowClass&&(e=d.DT_RowClass.split(" "),b.__rowc=b.__rowc?qa(b.__rowc.concat(e)):e,h(c).removeClass(b.__rowc.join(" ")).addClass(d.DT_RowClass));
+d.DT_RowAttr&&h(c).attr(d.DT_RowAttr);d.DT_RowData&&h(c).data(d.DT_RowData)}}function kb(a){var b,c,d,e,f,g=a.nTHead,j=a.nTFoot,i=0===h("th, td",g).length,m=a.oClasses,l=a.aoColumns;i&&(e=h("<tr/>").appendTo(g));b=0;for(c=l.length;b<c;b++)f=l[b],d=h(f.nTh).addClass(f.sClass),i&&d.appendTo(e),a.oFeatures.bSort&&(d.addClass(f.sSortingClass),!1!==f.bSortable&&(d.attr("tabindex",a.iTabIndex).attr("aria-controls",a.sTableId),Ma(a,f.nTh,b))),f.sTitle!=d[0].innerHTML&&d.html(f.sTitle),Na(a,"header")(a,d,
+f,m);i&&ea(a.aoHeader,g);h(g).find(">tr").attr("role","row");h(g).find(">tr>th, >tr>td").addClass(m.sHeaderTH);h(j).find(">tr>th, >tr>td").addClass(m.sFooterTH);if(null!==j){a=a.aoFooter[0];b=0;for(c=a.length;b<c;b++)f=l[b],f.nTf=a[b].cell,f.sClass&&h(f.nTf).addClass(f.sClass)}}function fa(a,b,c){var d,e,f,g=[],j=[],i=a.aoColumns.length,m;if(b){c===k&&(c=!1);d=0;for(e=b.length;d<e;d++){g[d]=b[d].slice();g[d].nTr=b[d].nTr;for(f=i-1;0<=f;f--)!a.aoColumns[f].bVisible&&!c&&g[d].splice(f,1);j.push([])}d=
+0;for(e=g.length;d<e;d++){if(a=g[d].nTr)for(;f=a.firstChild;)a.removeChild(f);f=0;for(b=g[d].length;f<b;f++)if(m=i=1,j[d][f]===k){a.appendChild(g[d][f].cell);for(j[d][f]=1;g[d+i]!==k&&g[d][f].cell==g[d+i][f].cell;)j[d+i][f]=1,i++;for(;g[d][f+m]!==k&&g[d][f].cell==g[d][f+m].cell;){for(c=0;c<i;c++)j[d+c][f+m]=1;m++}h(g[d][f].cell).attr("rowspan",i).attr("colspan",m)}}}}function P(a){var b=r(a,"aoPreDrawCallback","preDraw",[a]);if(-1!==h.inArray(!1,b))C(a,!1);else{var b=[],c=0,d=a.asStripeClasses,e=
+d.length,f=a.oLanguage,g=a.iInitDisplayStart,j="ssp"==y(a),i=a.aiDisplay;a.bDrawing=!0;g!==k&&-1!==g&&(a._iDisplayStart=j?g:g>=a.fnRecordsDisplay()?0:g,a.iInitDisplayStart=-1);var g=a._iDisplayStart,m=a.fnDisplayEnd();if(a.bDeferLoading)a.bDeferLoading=!1,a.iDraw++,C(a,!1);else if(j){if(!a.bDestroying&&!lb(a))return}else a.iDraw++;if(0!==i.length){f=j?a.aoData.length:m;for(j=j?0:g;j<f;j++){var l=i[j],q=a.aoData[l];null===q.nTr&&Ha(a,l);var t=q.nTr;if(0!==e){var G=d[c%e];q._sRowStripe!=G&&(h(t).removeClass(q._sRowStripe).addClass(G),
+q._sRowStripe=G)}r(a,"aoRowCallback",null,[t,q._aData,c,j,l]);b.push(t);c++}}else c=f.sZeroRecords,1==a.iDraw&&"ajax"==y(a)?c=f.sLoadingRecords:f.sEmptyTable&&0===a.fnRecordsTotal()&&(c=f.sEmptyTable),b[0]=h("<tr/>",{"class":e?d[0]:""}).append(h("<td />",{valign:"top",colSpan:V(a),"class":a.oClasses.sRowEmpty}).html(c))[0];r(a,"aoHeaderCallback","header",[h(a.nTHead).children("tr")[0],Ka(a),g,m,i]);r(a,"aoFooterCallback","footer",[h(a.nTFoot).children("tr")[0],Ka(a),g,m,i]);d=h(a.nTBody);d.children().detach();
+d.append(h(b));r(a,"aoDrawCallback","draw",[a]);a.bSorted=!1;a.bFiltered=!1;a.bDrawing=!1}}function T(a,b){var c=a.oFeatures,d=c.bFilter;c.bSort&&mb(a);d?ga(a,a.oPreviousSearch):a.aiDisplay=a.aiDisplayMaster.slice();!0!==b&&(a._iDisplayStart=0);a._drawHold=b;P(a);a._drawHold=!1}function nb(a){var b=a.oClasses,c=h(a.nTable),c=h("<div/>").insertBefore(c),d=a.oFeatures,e=h("<div/>",{id:a.sTableId+"_wrapper","class":b.sWrapper+(a.nTFoot?"":" "+b.sNoFooter)});a.nHolding=c[0];a.nTableWrapper=e[0];a.nTableReinsertBefore=
+a.nTable.nextSibling;for(var f=a.sDom.split(""),g,j,i,m,l,q,k=0;k<f.length;k++){g=null;j=f[k];if("<"==j){i=h("<div/>")[0];m=f[k+1];if("'"==m||'"'==m){l="";for(q=2;f[k+q]!=m;)l+=f[k+q],q++;"H"==l?l=b.sJUIHeader:"F"==l&&(l=b.sJUIFooter);-1!=l.indexOf(".")?(m=l.split("."),i.id=m[0].substr(1,m[0].length-1),i.className=m[1]):"#"==l.charAt(0)?i.id=l.substr(1,l.length-1):i.className=l;k+=q}e.append(i);e=h(i)}else if(">"==j)e=e.parent();else if("l"==j&&d.bPaginate&&d.bLengthChange)g=ob(a);else if("f"==j&&
+d.bFilter)g=pb(a);else if("r"==j&&d.bProcessing)g=qb(a);else if("t"==j)g=rb(a);else if("i"==j&&d.bInfo)g=sb(a);else if("p"==j&&d.bPaginate)g=tb(a);else if(0!==n.ext.feature.length){i=n.ext.feature;q=0;for(m=i.length;q<m;q++)if(j==i[q].cFeature){g=i[q].fnInit(a);break}}g&&(i=a.aanFeatures,i[j]||(i[j]=[]),i[j].push(g),e.append(g))}c.replaceWith(e);a.nHolding=null}function ea(a,b){var c=h(b).children("tr"),d,e,f,g,j,i,m,l,q,k;a.splice(0,a.length);f=0;for(i=c.length;f<i;f++)a.push([]);f=0;for(i=c.length;f<
+i;f++){d=c[f];for(e=d.firstChild;e;){if("TD"==e.nodeName.toUpperCase()||"TH"==e.nodeName.toUpperCase()){l=1*e.getAttribute("colspan");q=1*e.getAttribute("rowspan");l=!l||0===l||1===l?1:l;q=!q||0===q||1===q?1:q;g=0;for(j=a[f];j[g];)g++;m=g;k=1===l?!0:!1;for(j=0;j<l;j++)for(g=0;g<q;g++)a[f+g][m+j]={cell:e,unique:k},a[f+g].nTr=d}e=e.nextSibling}}}function ra(a,b,c){var d=[];c||(c=a.aoHeader,b&&(c=[],ea(c,b)));for(var b=0,e=c.length;b<e;b++)for(var f=0,g=c[b].length;f<g;f++)if(c[b][f].unique&&(!d[f]||
+!a.bSortCellsTop))d[f]=c[b][f].cell;return d}function sa(a,b,c){r(a,"aoServerParams","serverParams",[b]);if(b&&h.isArray(b)){var d={},e=/(.*?)\[\]$/;h.each(b,function(a,b){var c=b.name.match(e);c?(c=c[0],d[c]||(d[c]=[]),d[c].push(b.value)):d[b.name]=b.value});b=d}var f,g=a.ajax,j=a.oInstance,i=function(b){r(a,null,"xhr",[a,b,a.jqXHR]);c(b)};if(h.isPlainObject(g)&&g.data){f=g.data;var m="function"===typeof f?f(b,a):f,b="function"===typeof f&&m?m:h.extend(!0,b,m);delete g.data}m={data:b,success:function(b){var c=
+b.error||b.sError;c&&K(a,0,c);a.json=b;i(b)},dataType:"json",cache:!1,type:a.sServerMethod,error:function(b,c){var d=r(a,null,"xhr",[a,null,a.jqXHR]);-1===h.inArray(!0,d)&&("parsererror"==c?K(a,0,"Invalid JSON response",1):4===b.readyState&&K(a,0,"Ajax error",7));C(a,!1)}};a.oAjaxData=b;r(a,null,"preXhr",[a,b]);a.fnServerData?a.fnServerData.call(j,a.sAjaxSource,h.map(b,function(a,b){return{name:b,value:a}}),i,a):a.sAjaxSource||"string"===typeof g?a.jqXHR=h.ajax(h.extend(m,{url:g||a.sAjaxSource})):
+"function"===typeof g?a.jqXHR=g.call(j,b,i,a):(a.jqXHR=h.ajax(h.extend(m,g)),g.data=f)}function lb(a){return a.bAjaxDataGet?(a.iDraw++,C(a,!0),sa(a,ub(a),function(b){vb(a,b)}),!1):!0}function ub(a){var b=a.aoColumns,c=b.length,d=a.oFeatures,e=a.oPreviousSearch,f=a.aoPreSearchCols,g,j=[],i,m,l,k=X(a);g=a._iDisplayStart;i=!1!==d.bPaginate?a._iDisplayLength:-1;var t=function(a,b){j.push({name:a,value:b})};t("sEcho",a.iDraw);t("iColumns",c);t("sColumns",D(b,"sName").join(","));t("iDisplayStart",g);t("iDisplayLength",
+i);var G={draw:a.iDraw,columns:[],order:[],start:g,length:i,search:{value:e.sSearch,regex:e.bRegex}};for(g=0;g<c;g++)m=b[g],l=f[g],i="function"==typeof m.mData?"function":m.mData,G.columns.push({data:i,name:m.sName,searchable:m.bSearchable,orderable:m.bSortable,search:{value:l.sSearch,regex:l.bRegex}}),t("mDataProp_"+g,i),d.bFilter&&(t("sSearch_"+g,l.sSearch),t("bRegex_"+g,l.bRegex),t("bSearchable_"+g,m.bSearchable)),d.bSort&&t("bSortable_"+g,m.bSortable);d.bFilter&&(t("sSearch",e.sSearch),t("bRegex",
+e.bRegex));d.bSort&&(h.each(k,function(a,b){G.order.push({column:b.col,dir:b.dir});t("iSortCol_"+a,b.col);t("sSortDir_"+a,b.dir)}),t("iSortingCols",k.length));b=n.ext.legacy.ajax;return null===b?a.sAjaxSource?j:G:b?j:G}function vb(a,b){var c=ta(a,b),d=b.sEcho!==k?b.sEcho:b.draw,e=b.iTotalRecords!==k?b.iTotalRecords:b.recordsTotal,f=b.iTotalDisplayRecords!==k?b.iTotalDisplayRecords:b.recordsFiltered;if(d){if(1*d<a.iDraw)return;a.iDraw=1*d}oa(a);a._iRecordsTotal=parseInt(e,10);a._iRecordsDisplay=parseInt(f,
+10);d=0;for(e=c.length;d<e;d++)O(a,c[d]);a.aiDisplay=a.aiDisplayMaster.slice();a.bAjaxDataGet=!1;P(a);a._bInitComplete||ua(a,b);a.bAjaxDataGet=!0;C(a,!1)}function ta(a,b){var c=h.isPlainObject(a.ajax)&&a.ajax.dataSrc!==k?a.ajax.dataSrc:a.sAjaxDataProp;return"data"===c?b.aaData||b[c]:""!==c?S(c)(b):b}function pb(a){var b=a.oClasses,c=a.sTableId,d=a.oLanguage,e=a.oPreviousSearch,f=a.aanFeatures,g='<input type="search" class="'+b.sFilterInput+'"/>',j=d.sSearch,j=j.match(/_INPUT_/)?j.replace("_INPUT_",
+g):j+g,b=h("<div/>",{id:!f.f?c+"_filter":null,"class":b.sFilter}).append(h("<label/>").append(j)),f=function(){var b=!this.value?"":this.value;b!=e.sSearch&&(ga(a,{sSearch:b,bRegex:e.bRegex,bSmart:e.bSmart,bCaseInsensitive:e.bCaseInsensitive}),a._iDisplayStart=0,P(a))},g=null!==a.searchDelay?a.searchDelay:"ssp"===y(a)?400:0,i=h("input",b).val(e.sSearch).attr("placeholder",d.sSearchPlaceholder).on("keyup.DT search.DT input.DT paste.DT cut.DT",g?Oa(f,g):f).on("keypress.DT",function(a){if(13==a.keyCode)return!1}).attr("aria-controls",
+c);h(a.nTable).on("search.dt.DT",function(b,c){if(a===c)try{i[0]!==H.activeElement&&i.val(e.sSearch)}catch(d){}});return b[0]}function ga(a,b,c){var d=a.oPreviousSearch,e=a.aoPreSearchCols,f=function(a){d.sSearch=a.sSearch;d.bRegex=a.bRegex;d.bSmart=a.bSmart;d.bCaseInsensitive=a.bCaseInsensitive};Ga(a);if("ssp"!=y(a)){wb(a,b.sSearch,c,b.bEscapeRegex!==k?!b.bEscapeRegex:b.bRegex,b.bSmart,b.bCaseInsensitive);f(b);for(b=0;b<e.length;b++)xb(a,e[b].sSearch,b,e[b].bEscapeRegex!==k?!e[b].bEscapeRegex:e[b].bRegex,
+e[b].bSmart,e[b].bCaseInsensitive);yb(a)}else f(b);a.bFiltered=!0;r(a,null,"search",[a])}function yb(a){for(var b=n.ext.search,c=a.aiDisplay,d,e,f=0,g=b.length;f<g;f++){for(var j=[],i=0,m=c.length;i<m;i++)e=c[i],d=a.aoData[e],b[f](a,d._aFilterData,e,d._aData,i)&&j.push(e);c.length=0;h.merge(c,j)}}function xb(a,b,c,d,e,f){if(""!==b){for(var g=[],j=a.aiDisplay,d=Pa(b,d,e,f),e=0;e<j.length;e++)b=a.aoData[j[e]]._aFilterData[c],d.test(b)&&g.push(j[e]);a.aiDisplay=g}}function wb(a,b,c,d,e,f){var d=Pa(b,
+d,e,f),f=a.oPreviousSearch.sSearch,g=a.aiDisplayMaster,j,e=[];0!==n.ext.search.length&&(c=!0);j=zb(a);if(0>=b.length)a.aiDisplay=g.slice();else{if(j||c||f.length>b.length||0!==b.indexOf(f)||a.bSorted)a.aiDisplay=g.slice();b=a.aiDisplay;for(c=0;c<b.length;c++)d.test(a.aoData[b[c]]._sFilterRow)&&e.push(b[c]);a.aiDisplay=e}}function Pa(a,b,c,d){a=b?a:Qa(a);c&&(a="^(?=.*?"+h.map(a.match(/"[^"]+"|[^ ]+/g)||[""],function(a){if('"'===a.charAt(0))var b=a.match(/^"(.*)"$/),a=b?b[1]:a;return a.replace('"',
+"")}).join(")(?=.*?")+").*$");return RegExp(a,d?"i":"")}function zb(a){var b=a.aoColumns,c,d,e,f,g,j,i,h,l=n.ext.type.search;c=!1;d=0;for(f=a.aoData.length;d<f;d++)if(h=a.aoData[d],!h._aFilterData){j=[];e=0;for(g=b.length;e<g;e++)c=b[e],c.bSearchable?(i=B(a,d,e,"filter"),l[c.sType]&&(i=l[c.sType](i)),null===i&&(i=""),"string"!==typeof i&&i.toString&&(i=i.toString())):i="",i.indexOf&&-1!==i.indexOf("&")&&(va.innerHTML=i,i=Wb?va.textContent:va.innerText),i.replace&&(i=i.replace(/[\r\n]/g,"")),j.push(i);
+h._aFilterData=j;h._sFilterRow=j.join("  ");c=!0}return c}function Ab(a){return{search:a.sSearch,smart:a.bSmart,regex:a.bRegex,caseInsensitive:a.bCaseInsensitive}}function Bb(a){return{sSearch:a.search,bSmart:a.smart,bRegex:a.regex,bCaseInsensitive:a.caseInsensitive}}function sb(a){var b=a.sTableId,c=a.aanFeatures.i,d=h("<div/>",{"class":a.oClasses.sInfo,id:!c?b+"_info":null});c||(a.aoDrawCallback.push({fn:Cb,sName:"information"}),d.attr("role","status").attr("aria-live","polite"),h(a.nTable).attr("aria-describedby",
+b+"_info"));return d[0]}function Cb(a){var b=a.aanFeatures.i;if(0!==b.length){var c=a.oLanguage,d=a._iDisplayStart+1,e=a.fnDisplayEnd(),f=a.fnRecordsTotal(),g=a.fnRecordsDisplay(),j=g?c.sInfo:c.sInfoEmpty;g!==f&&(j+=" "+c.sInfoFiltered);j+=c.sInfoPostFix;j=Db(a,j);c=c.fnInfoCallback;null!==c&&(j=c.call(a.oInstance,a,d,e,f,g,j));h(b).html(j)}}function Db(a,b){var c=a.fnFormatNumber,d=a._iDisplayStart+1,e=a._iDisplayLength,f=a.fnRecordsDisplay(),g=-1===e;return b.replace(/_START_/g,c.call(a,d)).replace(/_END_/g,
+c.call(a,a.fnDisplayEnd())).replace(/_MAX_/g,c.call(a,a.fnRecordsTotal())).replace(/_TOTAL_/g,c.call(a,f)).replace(/_PAGE_/g,c.call(a,g?1:Math.ceil(d/e))).replace(/_PAGES_/g,c.call(a,g?1:Math.ceil(f/e)))}function ha(a){var b,c,d=a.iInitDisplayStart,e=a.aoColumns,f;c=a.oFeatures;var g=a.bDeferLoading;if(a.bInitialised){nb(a);kb(a);fa(a,a.aoHeader);fa(a,a.aoFooter);C(a,!0);c.bAutoWidth&&Fa(a);b=0;for(c=e.length;b<c;b++)f=e[b],f.sWidth&&(f.nTh.style.width=v(f.sWidth));r(a,null,"preInit",[a]);T(a);e=
+y(a);if("ssp"!=e||g)"ajax"==e?sa(a,[],function(c){var f=ta(a,c);for(b=0;b<f.length;b++)O(a,f[b]);a.iInitDisplayStart=d;T(a);C(a,!1);ua(a,c)},a):(C(a,!1),ua(a))}else setTimeout(function(){ha(a)},200)}function ua(a,b){a._bInitComplete=!0;(b||a.oInit.aaData)&&$(a);r(a,null,"plugin-init",[a,b]);r(a,"aoInitComplete","init",[a,b])}function Ra(a,b){var c=parseInt(b,10);a._iDisplayLength=c;Sa(a);r(a,null,"length",[a,c])}function ob(a){for(var b=a.oClasses,c=a.sTableId,d=a.aLengthMenu,e=h.isArray(d[0]),f=
+e?d[0]:d,d=e?d[1]:d,e=h("<select/>",{name:c+"_length","aria-controls":c,"class":b.sLengthSelect}),g=0,j=f.length;g<j;g++)e[0][g]=new Option("number"===typeof d[g]?a.fnFormatNumber(d[g]):d[g],f[g]);var i=h("<div><label/></div>").addClass(b.sLength);a.aanFeatures.l||(i[0].id=c+"_length");i.children().append(a.oLanguage.sLengthMenu.replace("_MENU_",e[0].outerHTML));h("select",i).val(a._iDisplayLength).on("change.DT",function(){Ra(a,h(this).val());P(a)});h(a.nTable).on("length.dt.DT",function(b,c,d){a===
+c&&h("select",i).val(d)});return i[0]}function tb(a){var b=a.sPaginationType,c=n.ext.pager[b],d="function"===typeof c,e=function(a){P(a)},b=h("<div/>").addClass(a.oClasses.sPaging+b)[0],f=a.aanFeatures;d||c.fnInit(a,b,e);f.p||(b.id=a.sTableId+"_paginate",a.aoDrawCallback.push({fn:function(a){if(d){var b=a._iDisplayStart,i=a._iDisplayLength,h=a.fnRecordsDisplay(),l=-1===i,b=l?0:Math.ceil(b/i),i=l?1:Math.ceil(h/i),h=c(b,i),k,l=0;for(k=f.p.length;l<k;l++)Na(a,"pageButton")(a,f.p[l],l,h,b,i)}else c.fnUpdate(a,
+e)},sName:"pagination"}));return b}function Ta(a,b,c){var d=a._iDisplayStart,e=a._iDisplayLength,f=a.fnRecordsDisplay();0===f||-1===e?d=0:"number"===typeof b?(d=b*e,d>f&&(d=0)):"first"==b?d=0:"previous"==b?(d=0<=e?d-e:0,0>d&&(d=0)):"next"==b?d+e<f&&(d+=e):"last"==b?d=Math.floor((f-1)/e)*e:K(a,0,"Unknown paging action: "+b,5);b=a._iDisplayStart!==d;a._iDisplayStart=d;b&&(r(a,null,"page",[a]),c&&P(a));return b}function qb(a){return h("<div/>",{id:!a.aanFeatures.r?a.sTableId+"_processing":null,"class":a.oClasses.sProcessing}).html(a.oLanguage.sProcessing).insertBefore(a.nTable)[0]}
+function C(a,b){a.oFeatures.bProcessing&&h(a.aanFeatures.r).css("display",b?"block":"none");r(a,null,"processing",[a,b])}function rb(a){var b=h(a.nTable);b.attr("role","grid");var c=a.oScroll;if(""===c.sX&&""===c.sY)return a.nTable;var d=c.sX,e=c.sY,f=a.oClasses,g=b.children("caption"),j=g.length?g[0]._captionSide:null,i=h(b[0].cloneNode(!1)),m=h(b[0].cloneNode(!1)),l=b.children("tfoot");l.length||(l=null);i=h("<div/>",{"class":f.sScrollWrapper}).append(h("<div/>",{"class":f.sScrollHead}).css({overflow:"hidden",
+position:"relative",border:0,width:d?!d?null:v(d):"100%"}).append(h("<div/>",{"class":f.sScrollHeadInner}).css({"box-sizing":"content-box",width:c.sXInner||"100%"}).append(i.removeAttr("id").css("margin-left",0).append("top"===j?g:null).append(b.children("thead"))))).append(h("<div/>",{"class":f.sScrollBody}).css({position:"relative",overflow:"auto",width:!d?null:v(d)}).append(b));l&&i.append(h("<div/>",{"class":f.sScrollFoot}).css({overflow:"hidden",border:0,width:d?!d?null:v(d):"100%"}).append(h("<div/>",
+{"class":f.sScrollFootInner}).append(m.removeAttr("id").css("margin-left",0).append("bottom"===j?g:null).append(b.children("tfoot")))));var b=i.children(),k=b[0],f=b[1],t=l?b[2]:null;if(d)h(f).on("scroll.DT",function(){var a=this.scrollLeft;k.scrollLeft=a;l&&(t.scrollLeft=a)});h(f).css(e&&c.bCollapse?"max-height":"height",e);a.nScrollHead=k;a.nScrollBody=f;a.nScrollFoot=t;a.aoDrawCallback.push({fn:la,sName:"scrolling"});return i[0]}function la(a){var b=a.oScroll,c=b.sX,d=b.sXInner,e=b.sY,b=b.iBarWidth,
+f=h(a.nScrollHead),g=f[0].style,j=f.children("div"),i=j[0].style,m=j.children("table"),j=a.nScrollBody,l=h(j),q=j.style,t=h(a.nScrollFoot).children("div"),n=t.children("table"),o=h(a.nTHead),p=h(a.nTable),s=p[0],r=s.style,u=a.nTFoot?h(a.nTFoot):null,x=a.oBrowser,U=x.bScrollOversize,Xb=D(a.aoColumns,"nTh"),Q,L,R,w,Ua=[],y=[],z=[],A=[],B,C=function(a){a=a.style;a.paddingTop="0";a.paddingBottom="0";a.borderTopWidth="0";a.borderBottomWidth="0";a.height=0};L=j.scrollHeight>j.clientHeight;if(a.scrollBarVis!==
+L&&a.scrollBarVis!==k)a.scrollBarVis=L,$(a);else{a.scrollBarVis=L;p.children("thead, tfoot").remove();u&&(R=u.clone().prependTo(p),Q=u.find("tr"),R=R.find("tr"));w=o.clone().prependTo(p);o=o.find("tr");L=w.find("tr");w.find("th, td").removeAttr("tabindex");c||(q.width="100%",f[0].style.width="100%");h.each(ra(a,w),function(b,c){B=aa(a,b);c.style.width=a.aoColumns[B].sWidth});u&&I(function(a){a.style.width=""},R);f=p.outerWidth();if(""===c){r.width="100%";if(U&&(p.find("tbody").height()>j.offsetHeight||
+"scroll"==l.css("overflow-y")))r.width=v(p.outerWidth()-b);f=p.outerWidth()}else""!==d&&(r.width=v(d),f=p.outerWidth());I(C,L);I(function(a){z.push(a.innerHTML);Ua.push(v(h(a).css("width")))},L);I(function(a,b){if(h.inArray(a,Xb)!==-1)a.style.width=Ua[b]},o);h(L).height(0);u&&(I(C,R),I(function(a){A.push(a.innerHTML);y.push(v(h(a).css("width")))},R),I(function(a,b){a.style.width=y[b]},Q),h(R).height(0));I(function(a,b){a.innerHTML='<div class="dataTables_sizing">'+z[b]+"</div>";a.childNodes[0].style.height=
+"0";a.childNodes[0].style.overflow="hidden";a.style.width=Ua[b]},L);u&&I(function(a,b){a.innerHTML='<div class="dataTables_sizing">'+A[b]+"</div>";a.childNodes[0].style.height="0";a.childNodes[0].style.overflow="hidden";a.style.width=y[b]},R);if(p.outerWidth()<f){Q=j.scrollHeight>j.offsetHeight||"scroll"==l.css("overflow-y")?f+b:f;if(U&&(j.scrollHeight>j.offsetHeight||"scroll"==l.css("overflow-y")))r.width=v(Q-b);(""===c||""!==d)&&K(a,1,"Possible column misalignment",6)}else Q="100%";q.width=v(Q);
+g.width=v(Q);u&&(a.nScrollFoot.style.width=v(Q));!e&&U&&(q.height=v(s.offsetHeight+b));c=p.outerWidth();m[0].style.width=v(c);i.width=v(c);d=p.height()>j.clientHeight||"scroll"==l.css("overflow-y");e="padding"+(x.bScrollbarLeft?"Left":"Right");i[e]=d?b+"px":"0px";u&&(n[0].style.width=v(c),t[0].style.width=v(c),t[0].style[e]=d?b+"px":"0px");p.children("colgroup").insertBefore(p.children("thead"));l.scroll();if((a.bSorted||a.bFiltered)&&!a._drawHold)j.scrollTop=0}}function I(a,b,c){for(var d=0,e=0,
+f=b.length,g,j;e<f;){g=b[e].firstChild;for(j=c?c[e].firstChild:null;g;)1===g.nodeType&&(c?a(g,j,d):a(g,d),d++),g=g.nextSibling,j=c?j.nextSibling:null;e++}}function Fa(a){var b=a.nTable,c=a.aoColumns,d=a.oScroll,e=d.sY,f=d.sX,g=d.sXInner,j=c.length,i=ma(a,"bVisible"),m=h("th",a.nTHead),l=b.getAttribute("width"),k=b.parentNode,t=!1,n,o,p=a.oBrowser,d=p.bScrollOversize;(n=b.style.width)&&-1!==n.indexOf("%")&&(l=n);for(n=0;n<i.length;n++)o=c[i[n]],null!==o.sWidth&&(o.sWidth=Eb(o.sWidthOrig,k),t=!0);if(d||
+!t&&!f&&!e&&j==V(a)&&j==m.length)for(n=0;n<j;n++)i=aa(a,n),null!==i&&(c[i].sWidth=v(m.eq(n).width()));else{j=h(b).clone().css("visibility","hidden").removeAttr("id");j.find("tbody tr").remove();var s=h("<tr/>").appendTo(j.find("tbody"));j.find("thead, tfoot").remove();j.append(h(a.nTHead).clone()).append(h(a.nTFoot).clone());j.find("tfoot th, tfoot td").css("width","");m=ra(a,j.find("thead")[0]);for(n=0;n<i.length;n++)o=c[i[n]],m[n].style.width=null!==o.sWidthOrig&&""!==o.sWidthOrig?v(o.sWidthOrig):
+"",o.sWidthOrig&&f&&h(m[n]).append(h("<div/>").css({width:o.sWidthOrig,margin:0,padding:0,border:0,height:1}));if(a.aoData.length)for(n=0;n<i.length;n++)t=i[n],o=c[t],h(Fb(a,t)).clone(!1).append(o.sContentPadding).appendTo(s);h("[name]",j).removeAttr("name");o=h("<div/>").css(f||e?{position:"absolute",top:0,left:0,height:1,right:0,overflow:"hidden"}:{}).append(j).appendTo(k);f&&g?j.width(g):f?(j.css("width","auto"),j.removeAttr("width"),j.width()<k.clientWidth&&l&&j.width(k.clientWidth)):e?j.width(k.clientWidth):
+l&&j.width(l);for(n=e=0;n<i.length;n++)k=h(m[n]),g=k.outerWidth()-k.width(),k=p.bBounding?Math.ceil(m[n].getBoundingClientRect().width):k.outerWidth(),e+=k,c[i[n]].sWidth=v(k-g);b.style.width=v(e);o.remove()}l&&(b.style.width=v(l));if((l||f)&&!a._reszEvt)b=function(){h(E).on("resize.DT-"+a.sInstance,Oa(function(){$(a)}))},d?setTimeout(b,1E3):b(),a._reszEvt=!0}function Eb(a,b){if(!a)return 0;var c=h("<div/>").css("width",v(a)).appendTo(b||H.body),d=c[0].offsetWidth;c.remove();return d}function Fb(a,
+b){var c=Gb(a,b);if(0>c)return null;var d=a.aoData[c];return!d.nTr?h("<td/>").html(B(a,c,b,"display"))[0]:d.anCells[b]}function Gb(a,b){for(var c,d=-1,e=-1,f=0,g=a.aoData.length;f<g;f++)c=B(a,f,b,"display")+"",c=c.replace(Yb,""),c=c.replace(/&nbsp;/g," "),c.length>d&&(d=c.length,e=f);return e}function v(a){return null===a?"0px":"number"==typeof a?0>a?"0px":a+"px":a.match(/\d$/)?a+"px":a}function X(a){var b,c,d=[],e=a.aoColumns,f,g,j,i;b=a.aaSortingFixed;c=h.isPlainObject(b);var m=[];f=function(a){a.length&&
+!h.isArray(a[0])?m.push(a):h.merge(m,a)};h.isArray(b)&&f(b);c&&b.pre&&f(b.pre);f(a.aaSorting);c&&b.post&&f(b.post);for(a=0;a<m.length;a++){i=m[a][0];f=e[i].aDataSort;b=0;for(c=f.length;b<c;b++)g=f[b],j=e[g].sType||"string",m[a]._idx===k&&(m[a]._idx=h.inArray(m[a][1],e[g].asSorting)),d.push({src:i,col:g,dir:m[a][1],index:m[a]._idx,type:j,formatter:n.ext.type.order[j+"-pre"]})}return d}function mb(a){var b,c,d=[],e=n.ext.type.order,f=a.aoData,g=0,j,i=a.aiDisplayMaster,h;Ga(a);h=X(a);b=0;for(c=h.length;b<
+c;b++)j=h[b],j.formatter&&g++,Hb(a,j.col);if("ssp"!=y(a)&&0!==h.length){b=0;for(c=i.length;b<c;b++)d[i[b]]=b;g===h.length?i.sort(function(a,b){var c,e,g,j,i=h.length,k=f[a]._aSortData,n=f[b]._aSortData;for(g=0;g<i;g++)if(j=h[g],c=k[j.col],e=n[j.col],c=c<e?-1:c>e?1:0,0!==c)return"asc"===j.dir?c:-c;c=d[a];e=d[b];return c<e?-1:c>e?1:0}):i.sort(function(a,b){var c,g,j,i,k=h.length,n=f[a]._aSortData,o=f[b]._aSortData;for(j=0;j<k;j++)if(i=h[j],c=n[i.col],g=o[i.col],i=e[i.type+"-"+i.dir]||e["string-"+i.dir],
+c=i(c,g),0!==c)return c;c=d[a];g=d[b];return c<g?-1:c>g?1:0})}a.bSorted=!0}function Ib(a){for(var b,c,d=a.aoColumns,e=X(a),a=a.oLanguage.oAria,f=0,g=d.length;f<g;f++){c=d[f];var j=c.asSorting;b=c.sTitle.replace(/<.*?>/g,"");var i=c.nTh;i.removeAttribute("aria-sort");c.bSortable&&(0<e.length&&e[0].col==f?(i.setAttribute("aria-sort","asc"==e[0].dir?"ascending":"descending"),c=j[e[0].index+1]||j[0]):c=j[0],b+="asc"===c?a.sSortAscending:a.sSortDescending);i.setAttribute("aria-label",b)}}function Va(a,
+b,c,d){var e=a.aaSorting,f=a.aoColumns[b].asSorting,g=function(a,b){var c=a._idx;c===k&&(c=h.inArray(a[1],f));return c+1<f.length?c+1:b?null:0};"number"===typeof e[0]&&(e=a.aaSorting=[e]);c&&a.oFeatures.bSortMulti?(c=h.inArray(b,D(e,"0")),-1!==c?(b=g(e[c],!0),null===b&&1===e.length&&(b=0),null===b?e.splice(c,1):(e[c][1]=f[b],e[c]._idx=b)):(e.push([b,f[0],0]),e[e.length-1]._idx=0)):e.length&&e[0][0]==b?(b=g(e[0]),e.length=1,e[0][1]=f[b],e[0]._idx=b):(e.length=0,e.push([b,f[0]]),e[0]._idx=0);T(a);"function"==
+typeof d&&d(a)}function Ma(a,b,c,d){var e=a.aoColumns[c];Wa(b,{},function(b){!1!==e.bSortable&&(a.oFeatures.bProcessing?(C(a,!0),setTimeout(function(){Va(a,c,b.shiftKey,d);"ssp"!==y(a)&&C(a,!1)},0)):Va(a,c,b.shiftKey,d))})}function wa(a){var b=a.aLastSort,c=a.oClasses.sSortColumn,d=X(a),e=a.oFeatures,f,g;if(e.bSort&&e.bSortClasses){e=0;for(f=b.length;e<f;e++)g=b[e].src,h(D(a.aoData,"anCells",g)).removeClass(c+(2>e?e+1:3));e=0;for(f=d.length;e<f;e++)g=d[e].src,h(D(a.aoData,"anCells",g)).addClass(c+
+(2>e?e+1:3))}a.aLastSort=d}function Hb(a,b){var c=a.aoColumns[b],d=n.ext.order[c.sSortDataType],e;d&&(e=d.call(a.oInstance,a,b,ba(a,b)));for(var f,g=n.ext.type.order[c.sType+"-pre"],j=0,i=a.aoData.length;j<i;j++)if(c=a.aoData[j],c._aSortData||(c._aSortData=[]),!c._aSortData[b]||d)f=d?e[j]:B(a,j,b,"sort"),c._aSortData[b]=g?g(f):f}function xa(a){if(a.oFeatures.bStateSave&&!a.bDestroying){var b={time:+new Date,start:a._iDisplayStart,length:a._iDisplayLength,order:h.extend(!0,[],a.aaSorting),search:Ab(a.oPreviousSearch),
+columns:h.map(a.aoColumns,function(b,d){return{visible:b.bVisible,search:Ab(a.aoPreSearchCols[d])}})};r(a,"aoStateSaveParams","stateSaveParams",[a,b]);a.oSavedState=b;a.fnStateSaveCallback.call(a.oInstance,a,b)}}function Jb(a,b,c){var d,e,f=a.aoColumns,b=function(b){if(b&&b.time){var g=r(a,"aoStateLoadParams","stateLoadParams",[a,b]);if(-1===h.inArray(!1,g)&&(g=a.iStateDuration,!(0<g&&b.time<+new Date-1E3*g)&&!(b.columns&&f.length!==b.columns.length))){a.oLoadedState=h.extend(!0,{},b);b.start!==k&&
+(a._iDisplayStart=b.start,a.iInitDisplayStart=b.start);b.length!==k&&(a._iDisplayLength=b.length);b.order!==k&&(a.aaSorting=[],h.each(b.order,function(b,c){a.aaSorting.push(c[0]>=f.length?[0,c[1]]:c)}));b.search!==k&&h.extend(a.oPreviousSearch,Bb(b.search));if(b.columns){d=0;for(e=b.columns.length;d<e;d++)g=b.columns[d],g.visible!==k&&(f[d].bVisible=g.visible),g.search!==k&&h.extend(a.aoPreSearchCols[d],Bb(g.search))}r(a,"aoStateLoaded","stateLoaded",[a,b])}}c()};if(a.oFeatures.bStateSave){var g=
+a.fnStateLoadCallback.call(a.oInstance,a,b);g!==k&&b(g)}else c()}function ya(a){var b=n.settings,a=h.inArray(a,D(b,"nTable"));return-1!==a?b[a]:null}function K(a,b,c,d){c="DataTables warning: "+(a?"table id="+a.sTableId+" - ":"")+c;d&&(c+=". For more information about this error, please see http://datatables.net/tn/"+d);if(b)E.console&&console.log&&console.log(c);else if(b=n.ext,b=b.sErrMode||b.errMode,a&&r(a,null,"error",[a,d,c]),"alert"==b)alert(c);else{if("throw"==b)throw Error(c);"function"==
+typeof b&&b(a,d,c)}}function F(a,b,c,d){h.isArray(c)?h.each(c,function(c,d){h.isArray(d)?F(a,b,d[0],d[1]):F(a,b,d)}):(d===k&&(d=c),b[c]!==k&&(a[d]=b[c]))}function Xa(a,b,c){var d,e;for(e in b)b.hasOwnProperty(e)&&(d=b[e],h.isPlainObject(d)?(h.isPlainObject(a[e])||(a[e]={}),h.extend(!0,a[e],d)):a[e]=c&&"data"!==e&&"aaData"!==e&&h.isArray(d)?d.slice():d);return a}function Wa(a,b,c){h(a).on("click.DT",b,function(b){h(a).blur();c(b)}).on("keypress.DT",b,function(a){13===a.which&&(a.preventDefault(),c(a))}).on("selectstart.DT",
+function(){return!1})}function z(a,b,c,d){c&&a[b].push({fn:c,sName:d})}function r(a,b,c,d){var e=[];b&&(e=h.map(a[b].slice().reverse(),function(b){return b.fn.apply(a.oInstance,d)}));null!==c&&(b=h.Event(c+".dt"),h(a.nTable).trigger(b,d),e.push(b.result));return e}function Sa(a){var b=a._iDisplayStart,c=a.fnDisplayEnd(),d=a._iDisplayLength;b>=c&&(b=c-d);b-=b%d;if(-1===d||0>b)b=0;a._iDisplayStart=b}function Na(a,b){var c=a.renderer,d=n.ext.renderer[b];return h.isPlainObject(c)&&c[b]?d[c[b]]||d._:"string"===
+typeof c?d[c]||d._:d._}function y(a){return a.oFeatures.bServerSide?"ssp":a.ajax||a.sAjaxSource?"ajax":"dom"}function ia(a,b){var c=[],c=Kb.numbers_length,d=Math.floor(c/2);b<=c?c=Y(0,b):a<=d?(c=Y(0,c-2),c.push("ellipsis"),c.push(b-1)):(a>=b-1-d?c=Y(b-(c-2),b):(c=Y(a-d+2,a+d-1),c.push("ellipsis"),c.push(b-1)),c.splice(0,0,"ellipsis"),c.splice(0,0,0));c.DT_el="span";return c}function Da(a){h.each({num:function(b){return za(b,a)},"num-fmt":function(b){return za(b,a,Ya)},"html-num":function(b){return za(b,
+a,Aa)},"html-num-fmt":function(b){return za(b,a,Aa,Ya)}},function(b,c){x.type.order[b+a+"-pre"]=c;b.match(/^html\-/)&&(x.type.search[b+a]=x.type.search.html)})}function Lb(a){return function(){var b=[ya(this[n.ext.iApiIndex])].concat(Array.prototype.slice.call(arguments));return n.ext.internal[a].apply(this,b)}}var n=function(a){this.$=function(a,b){return this.api(!0).$(a,b)};this._=function(a,b){return this.api(!0).rows(a,b).data()};this.api=function(a){return a?new s(ya(this[x.iApiIndex])):new s(this)};
+this.fnAddData=function(a,b){var c=this.api(!0),d=h.isArray(a)&&(h.isArray(a[0])||h.isPlainObject(a[0]))?c.rows.add(a):c.row.add(a);(b===k||b)&&c.draw();return d.flatten().toArray()};this.fnAdjustColumnSizing=function(a){var b=this.api(!0).columns.adjust(),c=b.settings()[0],d=c.oScroll;a===k||a?b.draw(!1):(""!==d.sX||""!==d.sY)&&la(c)};this.fnClearTable=function(a){var b=this.api(!0).clear();(a===k||a)&&b.draw()};this.fnClose=function(a){this.api(!0).row(a).child.hide()};this.fnDeleteRow=function(a,
+b,c){var d=this.api(!0),a=d.rows(a),e=a.settings()[0],h=e.aoData[a[0][0]];a.remove();b&&b.call(this,e,h);(c===k||c)&&d.draw();return h};this.fnDestroy=function(a){this.api(!0).destroy(a)};this.fnDraw=function(a){this.api(!0).draw(a)};this.fnFilter=function(a,b,c,d,e,h){e=this.api(!0);null===b||b===k?e.search(a,c,d,h):e.column(b).search(a,c,d,h);e.draw()};this.fnGetData=function(a,b){var c=this.api(!0);if(a!==k){var d=a.nodeName?a.nodeName.toLowerCase():"";return b!==k||"td"==d||"th"==d?c.cell(a,b).data():
+c.row(a).data()||null}return c.data().toArray()};this.fnGetNodes=function(a){var b=this.api(!0);return a!==k?b.row(a).node():b.rows().nodes().flatten().toArray()};this.fnGetPosition=function(a){var b=this.api(!0),c=a.nodeName.toUpperCase();return"TR"==c?b.row(a).index():"TD"==c||"TH"==c?(a=b.cell(a).index(),[a.row,a.columnVisible,a.column]):null};this.fnIsOpen=function(a){return this.api(!0).row(a).child.isShown()};this.fnOpen=function(a,b,c){return this.api(!0).row(a).child(b,c).show().child()[0]};
+this.fnPageChange=function(a,b){var c=this.api(!0).page(a);(b===k||b)&&c.draw(!1)};this.fnSetColumnVis=function(a,b,c){a=this.api(!0).column(a).visible(b);(c===k||c)&&a.columns.adjust().draw()};this.fnSettings=function(){return ya(this[x.iApiIndex])};this.fnSort=function(a){this.api(!0).order(a).draw()};this.fnSortListener=function(a,b,c){this.api(!0).order.listener(a,b,c)};this.fnUpdate=function(a,b,c,d,e){var h=this.api(!0);c===k||null===c?h.row(b).data(a):h.cell(b,c).data(a);(e===k||e)&&h.columns.adjust();
+(d===k||d)&&h.draw();return 0};this.fnVersionCheck=x.fnVersionCheck;var b=this,c=a===k,d=this.length;c&&(a={});this.oApi=this.internal=x.internal;for(var e in n.ext.internal)e&&(this[e]=Lb(e));this.each(function(){var e={},g=1<d?Xa(e,a,!0):a,j=0,i,e=this.getAttribute("id"),m=!1,l=n.defaults,q=h(this);if("table"!=this.nodeName.toLowerCase())K(null,0,"Non-table node initialisation ("+this.nodeName+")",2);else{eb(l);fb(l.column);J(l,l,!0);J(l.column,l.column,!0);J(l,h.extend(g,q.data()));var t=n.settings,
+j=0;for(i=t.length;j<i;j++){var o=t[j];if(o.nTable==this||o.nTHead&&o.nTHead.parentNode==this||o.nTFoot&&o.nTFoot.parentNode==this){var s=g.bRetrieve!==k?g.bRetrieve:l.bRetrieve;if(c||s)return o.oInstance;if(g.bDestroy!==k?g.bDestroy:l.bDestroy){o.oInstance.fnDestroy();break}else{K(o,0,"Cannot reinitialise DataTable",3);return}}if(o.sTableId==this.id){t.splice(j,1);break}}if(null===e||""===e)this.id=e="DataTables_Table_"+n.ext._unique++;var p=h.extend(!0,{},n.models.oSettings,{sDestroyWidth:q[0].style.width,
+sInstance:e,sTableId:e});p.nTable=this;p.oApi=b.internal;p.oInit=g;t.push(p);p.oInstance=1===b.length?b:q.dataTable();eb(g);Ca(g.oLanguage);g.aLengthMenu&&!g.iDisplayLength&&(g.iDisplayLength=h.isArray(g.aLengthMenu[0])?g.aLengthMenu[0][0]:g.aLengthMenu[0]);g=Xa(h.extend(!0,{},l),g);F(p.oFeatures,g,"bPaginate bLengthChange bFilter bSort bSortMulti bInfo bProcessing bAutoWidth bSortClasses bServerSide bDeferRender".split(" "));F(p,g,["asStripeClasses","ajax","fnServerData","fnFormatNumber","sServerMethod",
+"aaSorting","aaSortingFixed","aLengthMenu","sPaginationType","sAjaxSource","sAjaxDataProp","iStateDuration","sDom","bSortCellsTop","iTabIndex","fnStateLoadCallback","fnStateSaveCallback","renderer","searchDelay","rowId",["iCookieDuration","iStateDuration"],["oSearch","oPreviousSearch"],["aoSearchCols","aoPreSearchCols"],["iDisplayLength","_iDisplayLength"]]);F(p.oScroll,g,[["sScrollX","sX"],["sScrollXInner","sXInner"],["sScrollY","sY"],["bScrollCollapse","bCollapse"]]);F(p.oLanguage,g,"fnInfoCallback");
+z(p,"aoDrawCallback",g.fnDrawCallback,"user");z(p,"aoServerParams",g.fnServerParams,"user");z(p,"aoStateSaveParams",g.fnStateSaveParams,"user");z(p,"aoStateLoadParams",g.fnStateLoadParams,"user");z(p,"aoStateLoaded",g.fnStateLoaded,"user");z(p,"aoRowCallback",g.fnRowCallback,"user");z(p,"aoRowCreatedCallback",g.fnCreatedRow,"user");z(p,"aoHeaderCallback",g.fnHeaderCallback,"user");z(p,"aoFooterCallback",g.fnFooterCallback,"user");z(p,"aoInitComplete",g.fnInitComplete,"user");z(p,"aoPreDrawCallback",
+g.fnPreDrawCallback,"user");p.rowIdFn=S(g.rowId);gb(p);var u=p.oClasses;h.extend(u,n.ext.classes,g.oClasses);q.addClass(u.sTable);p.iInitDisplayStart===k&&(p.iInitDisplayStart=g.iDisplayStart,p._iDisplayStart=g.iDisplayStart);null!==g.iDeferLoading&&(p.bDeferLoading=!0,e=h.isArray(g.iDeferLoading),p._iRecordsDisplay=e?g.iDeferLoading[0]:g.iDeferLoading,p._iRecordsTotal=e?g.iDeferLoading[1]:g.iDeferLoading);var v=p.oLanguage;h.extend(!0,v,g.oLanguage);v.sUrl&&(h.ajax({dataType:"json",url:v.sUrl,success:function(a){Ca(a);
+J(l.oLanguage,a);h.extend(true,v,a);ha(p)},error:function(){ha(p)}}),m=!0);null===g.asStripeClasses&&(p.asStripeClasses=[u.sStripeOdd,u.sStripeEven]);var e=p.asStripeClasses,x=q.children("tbody").find("tr").eq(0);-1!==h.inArray(!0,h.map(e,function(a){return x.hasClass(a)}))&&(h("tbody tr",this).removeClass(e.join(" ")),p.asDestroyStripes=e.slice());e=[];t=this.getElementsByTagName("thead");0!==t.length&&(ea(p.aoHeader,t[0]),e=ra(p));if(null===g.aoColumns){t=[];j=0;for(i=e.length;j<i;j++)t.push(null)}else t=
+g.aoColumns;j=0;for(i=t.length;j<i;j++)Ea(p,e?e[j]:null);ib(p,g.aoColumnDefs,t,function(a,b){ka(p,a,b)});if(x.length){var w=function(a,b){return a.getAttribute("data-"+b)!==null?b:null};h(x[0]).children("th, td").each(function(a,b){var c=p.aoColumns[a];if(c.mData===a){var d=w(b,"sort")||w(b,"order"),e=w(b,"filter")||w(b,"search");if(d!==null||e!==null){c.mData={_:a+".display",sort:d!==null?a+".@data-"+d:k,type:d!==null?a+".@data-"+d:k,filter:e!==null?a+".@data-"+e:k};ka(p,a)}}})}var U=p.oFeatures,
+e=function(){if(g.aaSorting===k){var a=p.aaSorting;j=0;for(i=a.length;j<i;j++)a[j][1]=p.aoColumns[j].asSorting[0]}wa(p);U.bSort&&z(p,"aoDrawCallback",function(){if(p.bSorted){var a=X(p),b={};h.each(a,function(a,c){b[c.src]=c.dir});r(p,null,"order",[p,a,b]);Ib(p)}});z(p,"aoDrawCallback",function(){(p.bSorted||y(p)==="ssp"||U.bDeferRender)&&wa(p)},"sc");var a=q.children("caption").each(function(){this._captionSide=h(this).css("caption-side")}),b=q.children("thead");b.length===0&&(b=h("<thead/>").appendTo(q));
+p.nTHead=b[0];b=q.children("tbody");b.length===0&&(b=h("<tbody/>").appendTo(q));p.nTBody=b[0];b=q.children("tfoot");if(b.length===0&&a.length>0&&(p.oScroll.sX!==""||p.oScroll.sY!==""))b=h("<tfoot/>").appendTo(q);if(b.length===0||b.children().length===0)q.addClass(u.sNoFooter);else if(b.length>0){p.nTFoot=b[0];ea(p.aoFooter,p.nTFoot)}if(g.aaData)for(j=0;j<g.aaData.length;j++)O(p,g.aaData[j]);else(p.bDeferLoading||y(p)=="dom")&&na(p,h(p.nTBody).children("tr"));p.aiDisplay=p.aiDisplayMaster.slice();
+p.bInitialised=true;m===false&&ha(p)};g.bStateSave?(U.bStateSave=!0,z(p,"aoDrawCallback",xa,"state_save"),Jb(p,g,e)):e()}});b=null;return this},x,s,o,u,Za={},Mb=/[\r\n]/g,Aa=/<.*?>/g,Zb=/^\d{2,4}[\.\/\-]\d{1,2}[\.\/\-]\d{1,2}([T ]{1}\d{1,2}[:\.]\d{2}([\.:]\d{2})?)?$/,$b=RegExp("(\\/|\\.|\\*|\\+|\\?|\\||\\(|\\)|\\[|\\]|\\{|\\}|\\\\|\\$|\\^|\\-)","g"),Ya=/[',$£€¥%\u2009\u202F\u20BD\u20a9\u20BArfkɃΞ]/gi,M=function(a){return!a||!0===a||"-"===a?!0:!1},Nb=function(a){var b=parseInt(a,10);return!isNaN(b)&&
+isFinite(a)?b:null},Ob=function(a,b){Za[b]||(Za[b]=RegExp(Qa(b),"g"));return"string"===typeof a&&"."!==b?a.replace(/\./g,"").replace(Za[b],"."):a},$a=function(a,b,c){var d="string"===typeof a;if(M(a))return!0;b&&d&&(a=Ob(a,b));c&&d&&(a=a.replace(Ya,""));return!isNaN(parseFloat(a))&&isFinite(a)},Pb=function(a,b,c){return M(a)?!0:!(M(a)||"string"===typeof a)?null:$a(a.replace(Aa,""),b,c)?!0:null},D=function(a,b,c){var d=[],e=0,f=a.length;if(c!==k)for(;e<f;e++)a[e]&&a[e][b]&&d.push(a[e][b][c]);else for(;e<
+f;e++)a[e]&&d.push(a[e][b]);return d},ja=function(a,b,c,d){var e=[],f=0,g=b.length;if(d!==k)for(;f<g;f++)a[b[f]][c]&&e.push(a[b[f]][c][d]);else for(;f<g;f++)e.push(a[b[f]][c]);return e},Y=function(a,b){var c=[],d;b===k?(b=0,d=a):(d=b,b=a);for(var e=b;e<d;e++)c.push(e);return c},Qb=function(a){for(var b=[],c=0,d=a.length;c<d;c++)a[c]&&b.push(a[c]);return b},qa=function(a){var b;a:{if(!(2>a.length)){b=a.slice().sort();for(var c=b[0],d=1,e=b.length;d<e;d++){if(b[d]===c){b=!1;break a}c=b[d]}}b=!0}if(b)return a.slice();
+b=[];var e=a.length,f,g=0,d=0;a:for(;d<e;d++){c=a[d];for(f=0;f<g;f++)if(b[f]===c)continue a;b.push(c);g++}return b};n.util={throttle:function(a,b){var c=b!==k?b:200,d,e;return function(){var b=this,g=+new Date,j=arguments;d&&g<d+c?(clearTimeout(e),e=setTimeout(function(){d=k;a.apply(b,j)},c)):(d=g,a.apply(b,j))}},escapeRegex:function(a){return a.replace($b,"\\$1")}};var A=function(a,b,c){a[b]!==k&&(a[c]=a[b])},ca=/\[.*?\]$/,W=/\(\)$/,Qa=n.util.escapeRegex,va=h("<div>")[0],Wb=va.textContent!==k,Yb=
+/<.*?>/g,Oa=n.util.throttle,Rb=[],w=Array.prototype,ac=function(a){var b,c,d=n.settings,e=h.map(d,function(a){return a.nTable});if(a){if(a.nTable&&a.oApi)return[a];if(a.nodeName&&"table"===a.nodeName.toLowerCase())return b=h.inArray(a,e),-1!==b?[d[b]]:null;if(a&&"function"===typeof a.settings)return a.settings().toArray();"string"===typeof a?c=h(a):a instanceof h&&(c=a)}else return[];if(c)return c.map(function(){b=h.inArray(this,e);return-1!==b?d[b]:null}).toArray()};s=function(a,b){if(!(this instanceof
+s))return new s(a,b);var c=[],d=function(a){(a=ac(a))&&(c=c.concat(a))};if(h.isArray(a))for(var e=0,f=a.length;e<f;e++)d(a[e]);else d(a);this.context=qa(c);b&&h.merge(this,b);this.selector={rows:null,cols:null,opts:null};s.extend(this,this,Rb)};n.Api=s;h.extend(s.prototype,{any:function(){return 0!==this.count()},concat:w.concat,context:[],count:function(){return this.flatten().length},each:function(a){for(var b=0,c=this.length;b<c;b++)a.call(this,this[b],b,this);return this},eq:function(a){var b=
+this.context;return b.length>a?new s(b[a],this[a]):null},filter:function(a){var b=[];if(w.filter)b=w.filter.call(this,a,this);else for(var c=0,d=this.length;c<d;c++)a.call(this,this[c],c,this)&&b.push(this[c]);return new s(this.context,b)},flatten:function(){var a=[];return new s(this.context,a.concat.apply(a,this.toArray()))},join:w.join,indexOf:w.indexOf||function(a,b){for(var c=b||0,d=this.length;c<d;c++)if(this[c]===a)return c;return-1},iterator:function(a,b,c,d){var e=[],f,g,j,h,m,l=this.context,
+n,o,u=this.selector;"string"===typeof a&&(d=c,c=b,b=a,a=!1);g=0;for(j=l.length;g<j;g++){var r=new s(l[g]);if("table"===b)f=c.call(r,l[g],g),f!==k&&e.push(f);else if("columns"===b||"rows"===b)f=c.call(r,l[g],this[g],g),f!==k&&e.push(f);else if("column"===b||"column-rows"===b||"row"===b||"cell"===b){o=this[g];"column-rows"===b&&(n=Ba(l[g],u.opts));h=0;for(m=o.length;h<m;h++)f=o[h],f="cell"===b?c.call(r,l[g],f.row,f.column,g,h):c.call(r,l[g],f,g,h,n),f!==k&&e.push(f)}}return e.length||d?(a=new s(l,a?
+e.concat.apply([],e):e),b=a.selector,b.rows=u.rows,b.cols=u.cols,b.opts=u.opts,a):this},lastIndexOf:w.lastIndexOf||function(a,b){return this.indexOf.apply(this.toArray.reverse(),arguments)},length:0,map:function(a){var b=[];if(w.map)b=w.map.call(this,a,this);else for(var c=0,d=this.length;c<d;c++)b.push(a.call(this,this[c],c));return new s(this.context,b)},pluck:function(a){return this.map(function(b){return b[a]})},pop:w.pop,push:w.push,reduce:w.reduce||function(a,b){return hb(this,a,b,0,this.length,
+1)},reduceRight:w.reduceRight||function(a,b){return hb(this,a,b,this.length-1,-1,-1)},reverse:w.reverse,selector:null,shift:w.shift,slice:function(){return new s(this.context,this)},sort:w.sort,splice:w.splice,toArray:function(){return w.slice.call(this)},to$:function(){return h(this)},toJQuery:function(){return h(this)},unique:function(){return new s(this.context,qa(this))},unshift:w.unshift});s.extend=function(a,b,c){if(c.length&&b&&(b instanceof s||b.__dt_wrapper)){var d,e,f,g=function(a,b,c){return function(){var d=
+b.apply(a,arguments);s.extend(d,d,c.methodExt);return d}};d=0;for(e=c.length;d<e;d++)f=c[d],b[f.name]="function"===typeof f.val?g(a,f.val,f):h.isPlainObject(f.val)?{}:f.val,b[f.name].__dt_wrapper=!0,s.extend(a,b[f.name],f.propExt)}};s.register=o=function(a,b){if(h.isArray(a))for(var c=0,d=a.length;c<d;c++)s.register(a[c],b);else for(var e=a.split("."),f=Rb,g,j,c=0,d=e.length;c<d;c++){g=(j=-1!==e[c].indexOf("()"))?e[c].replace("()",""):e[c];var i;a:{i=0;for(var m=f.length;i<m;i++)if(f[i].name===g){i=
+f[i];break a}i=null}i||(i={name:g,val:{},methodExt:[],propExt:[]},f.push(i));c===d-1?i.val=b:f=j?i.methodExt:i.propExt}};s.registerPlural=u=function(a,b,c){s.register(a,c);s.register(b,function(){var a=c.apply(this,arguments);return a===this?this:a instanceof s?a.length?h.isArray(a[0])?new s(a.context,a[0]):a[0]:k:a})};o("tables()",function(a){var b;if(a){b=s;var c=this.context;if("number"===typeof a)a=[c[a]];else var d=h.map(c,function(a){return a.nTable}),a=h(d).filter(a).map(function(){var a=h.inArray(this,
+d);return c[a]}).toArray();b=new b(a)}else b=this;return b});o("table()",function(a){var a=this.tables(a),b=a.context;return b.length?new s(b[0]):a});u("tables().nodes()","table().node()",function(){return this.iterator("table",function(a){return a.nTable},1)});u("tables().body()","table().body()",function(){return this.iterator("table",function(a){return a.nTBody},1)});u("tables().header()","table().header()",function(){return this.iterator("table",function(a){return a.nTHead},1)});u("tables().footer()",
+"table().footer()",function(){return this.iterator("table",function(a){return a.nTFoot},1)});u("tables().containers()","table().container()",function(){return this.iterator("table",function(a){return a.nTableWrapper},1)});o("draw()",function(a){return this.iterator("table",function(b){"page"===a?P(b):("string"===typeof a&&(a="full-hold"===a?!1:!0),T(b,!1===a))})});o("page()",function(a){return a===k?this.page.info().page:this.iterator("table",function(b){Ta(b,a)})});o("page.info()",function(){if(0===
+this.context.length)return k;var a=this.context[0],b=a._iDisplayStart,c=a.oFeatures.bPaginate?a._iDisplayLength:-1,d=a.fnRecordsDisplay(),e=-1===c;return{page:e?0:Math.floor(b/c),pages:e?1:Math.ceil(d/c),start:b,end:a.fnDisplayEnd(),length:c,recordsTotal:a.fnRecordsTotal(),recordsDisplay:d,serverSide:"ssp"===y(a)}});o("page.len()",function(a){return a===k?0!==this.context.length?this.context[0]._iDisplayLength:k:this.iterator("table",function(b){Ra(b,a)})});var Sb=function(a,b,c){if(c){var d=new s(a);
+d.one("draw",function(){c(d.ajax.json())})}if("ssp"==y(a))T(a,b);else{C(a,!0);var e=a.jqXHR;e&&4!==e.readyState&&e.abort();sa(a,[],function(c){oa(a);for(var c=ta(a,c),d=0,e=c.length;d<e;d++)O(a,c[d]);T(a,b);C(a,!1)})}};o("ajax.json()",function(){var a=this.context;if(0<a.length)return a[0].json});o("ajax.params()",function(){var a=this.context;if(0<a.length)return a[0].oAjaxData});o("ajax.reload()",function(a,b){return this.iterator("table",function(c){Sb(c,!1===b,a)})});o("ajax.url()",function(a){var b=
+this.context;if(a===k){if(0===b.length)return k;b=b[0];return b.ajax?h.isPlainObject(b.ajax)?b.ajax.url:b.ajax:b.sAjaxSource}return this.iterator("table",function(b){h.isPlainObject(b.ajax)?b.ajax.url=a:b.ajax=a})});o("ajax.url().load()",function(a,b){return this.iterator("table",function(c){Sb(c,!1===b,a)})});var ab=function(a,b,c,d,e){var f=[],g,j,i,m,l,n;i=typeof b;if(!b||"string"===i||"function"===i||b.length===k)b=[b];i=0;for(m=b.length;i<m;i++){j=b[i]&&b[i].split&&!b[i].match(/[\[\(:]/)?b[i].split(","):
+[b[i]];l=0;for(n=j.length;l<n;l++)(g=c("string"===typeof j[l]?h.trim(j[l]):j[l]))&&g.length&&(f=f.concat(g))}a=x.selector[a];if(a.length){i=0;for(m=a.length;i<m;i++)f=a[i](d,e,f)}return qa(f)},bb=function(a){a||(a={});a.filter&&a.search===k&&(a.search=a.filter);return h.extend({search:"none",order:"current",page:"all"},a)},cb=function(a){for(var b=0,c=a.length;b<c;b++)if(0<a[b].length)return a[0]=a[b],a[0].length=1,a.length=1,a.context=[a.context[b]],a;a.length=0;return a},Ba=function(a,b){var c,
+d,e,f=[],g=a.aiDisplay;e=a.aiDisplayMaster;var j=b.search;c=b.order;d=b.page;if("ssp"==y(a))return"removed"===j?[]:Y(0,e.length);if("current"==d){c=a._iDisplayStart;for(d=a.fnDisplayEnd();c<d;c++)f.push(g[c])}else if("current"==c||"applied"==c)if("none"==j)f=e.slice();else if("applied"==j)f=g.slice();else{if("removed"==j){var i={};c=0;for(d=g.length;c<d;c++)i[g[c]]=null;f=h.map(e,function(a){return!i.hasOwnProperty(a)?a:null})}}else if("index"==c||"original"==c){c=0;for(d=a.aoData.length;c<d;c++)"none"==
+j?f.push(c):(e=h.inArray(c,g),(-1===e&&"removed"==j||0<=e&&"applied"==j)&&f.push(c))}return f};o("rows()",function(a,b){a===k?a="":h.isPlainObject(a)&&(b=a,a="");var b=bb(b),c=this.iterator("table",function(c){var e=b,f;return ab("row",a,function(a){var b=Nb(a),i=c.aoData;if(b!==null&&!e)return[b];f||(f=Ba(c,e));if(b!==null&&h.inArray(b,f)!==-1)return[b];if(a===null||a===k||a==="")return f;if(typeof a==="function")return h.map(f,function(b){var c=i[b];return a(b,c._aData,c.nTr)?b:null});if(a.nodeName){var b=
+a._DT_RowIndex,m=a._DT_CellIndex;if(b!==k)return i[b]&&i[b].nTr===a?[b]:[];if(m)return i[m.row]&&i[m.row].nTr===a?[m.row]:[];b=h(a).closest("*[data-dt-row]");return b.length?[b.data("dt-row")]:[]}if(typeof a==="string"&&a.charAt(0)==="#"){b=c.aIds[a.replace(/^#/,"")];if(b!==k)return[b.idx]}b=Qb(ja(c.aoData,f,"nTr"));return h(b).filter(a).map(function(){return this._DT_RowIndex}).toArray()},c,e)},1);c.selector.rows=a;c.selector.opts=b;return c});o("rows().nodes()",function(){return this.iterator("row",
+function(a,b){return a.aoData[b].nTr||k},1)});o("rows().data()",function(){return this.iterator(!0,"rows",function(a,b){return ja(a.aoData,b,"_aData")},1)});u("rows().cache()","row().cache()",function(a){return this.iterator("row",function(b,c){var d=b.aoData[c];return"search"===a?d._aFilterData:d._aSortData},1)});u("rows().invalidate()","row().invalidate()",function(a){return this.iterator("row",function(b,c){da(b,c,a)})});u("rows().indexes()","row().index()",function(){return this.iterator("row",
+function(a,b){return b},1)});u("rows().ids()","row().id()",function(a){for(var b=[],c=this.context,d=0,e=c.length;d<e;d++)for(var f=0,g=this[d].length;f<g;f++){var h=c[d].rowIdFn(c[d].aoData[this[d][f]]._aData);b.push((!0===a?"#":"")+h)}return new s(c,b)});u("rows().remove()","row().remove()",function(){var a=this;this.iterator("row",function(b,c,d){var e=b.aoData,f=e[c],g,h,i,m,l;e.splice(c,1);g=0;for(h=e.length;g<h;g++)if(i=e[g],l=i.anCells,null!==i.nTr&&(i.nTr._DT_RowIndex=g),null!==l){i=0;for(m=
+l.length;i<m;i++)l[i]._DT_CellIndex.row=g}pa(b.aiDisplayMaster,c);pa(b.aiDisplay,c);pa(a[d],c,!1);0<b._iRecordsDisplay&&b._iRecordsDisplay--;Sa(b);c=b.rowIdFn(f._aData);c!==k&&delete b.aIds[c]});this.iterator("table",function(a){for(var c=0,d=a.aoData.length;c<d;c++)a.aoData[c].idx=c});return this});o("rows.add()",function(a){var b=this.iterator("table",function(b){var c,f,g,h=[];f=0;for(g=a.length;f<g;f++)c=a[f],c.nodeName&&"TR"===c.nodeName.toUpperCase()?h.push(na(b,c)[0]):h.push(O(b,c));return h},
+1),c=this.rows(-1);c.pop();h.merge(c,b);return c});o("row()",function(a,b){return cb(this.rows(a,b))});o("row().data()",function(a){var b=this.context;if(a===k)return b.length&&this.length?b[0].aoData[this[0]]._aData:k;var c=b[0].aoData[this[0]];c._aData=a;h.isArray(a)&&c.nTr.id&&N(b[0].rowId)(a,c.nTr.id);da(b[0],this[0],"data");return this});o("row().node()",function(){var a=this.context;return a.length&&this.length?a[0].aoData[this[0]].nTr||null:null});o("row.add()",function(a){a instanceof h&&
+a.length&&(a=a[0]);var b=this.iterator("table",function(b){return a.nodeName&&"TR"===a.nodeName.toUpperCase()?na(b,a)[0]:O(b,a)});return this.row(b[0])});var db=function(a,b){var c=a.context;if(c.length&&(c=c[0].aoData[b!==k?b:a[0]])&&c._details)c._details.remove(),c._detailsShow=k,c._details=k},Tb=function(a,b){var c=a.context;if(c.length&&a.length){var d=c[0].aoData[a[0]];if(d._details){(d._detailsShow=b)?d._details.insertAfter(d.nTr):d._details.detach();var e=c[0],f=new s(e),g=e.aoData;f.off("draw.dt.DT_details column-visibility.dt.DT_details destroy.dt.DT_details");
+0<D(g,"_details").length&&(f.on("draw.dt.DT_details",function(a,b){e===b&&f.rows({page:"current"}).eq(0).each(function(a){a=g[a];a._detailsShow&&a._details.insertAfter(a.nTr)})}),f.on("column-visibility.dt.DT_details",function(a,b){if(e===b)for(var c,d=V(b),f=0,h=g.length;f<h;f++)c=g[f],c._details&&c._details.children("td[colspan]").attr("colspan",d)}),f.on("destroy.dt.DT_details",function(a,b){if(e===b)for(var c=0,d=g.length;c<d;c++)g[c]._details&&db(f,c)}))}}};o("row().child()",function(a,b){var c=
+this.context;if(a===k)return c.length&&this.length?c[0].aoData[this[0]]._details:k;if(!0===a)this.child.show();else if(!1===a)db(this);else if(c.length&&this.length){var d=c[0],c=c[0].aoData[this[0]],e=[],f=function(a,b){if(h.isArray(a)||a instanceof h)for(var c=0,k=a.length;c<k;c++)f(a[c],b);else a.nodeName&&"tr"===a.nodeName.toLowerCase()?e.push(a):(c=h("<tr><td/></tr>").addClass(b),h("td",c).addClass(b).html(a)[0].colSpan=V(d),e.push(c[0]))};f(a,b);c._details&&c._details.detach();c._details=h(e);
+c._detailsShow&&c._details.insertAfter(c.nTr)}return this});o(["row().child.show()","row().child().show()"],function(){Tb(this,!0);return this});o(["row().child.hide()","row().child().hide()"],function(){Tb(this,!1);return this});o(["row().child.remove()","row().child().remove()"],function(){db(this);return this});o("row().child.isShown()",function(){var a=this.context;return a.length&&this.length?a[0].aoData[this[0]]._detailsShow||!1:!1});var bc=/^([^:]+):(name|visIdx|visible)$/,Ub=function(a,b,
+c,d,e){for(var c=[],d=0,f=e.length;d<f;d++)c.push(B(a,e[d],b));return c};o("columns()",function(a,b){a===k?a="":h.isPlainObject(a)&&(b=a,a="");var b=bb(b),c=this.iterator("table",function(c){var e=a,f=b,g=c.aoColumns,j=D(g,"sName"),i=D(g,"nTh");return ab("column",e,function(a){var b=Nb(a);if(a==="")return Y(g.length);if(b!==null)return[b>=0?b:g.length+b];if(typeof a==="function"){var e=Ba(c,f);return h.map(g,function(b,f){return a(f,Ub(c,f,0,0,e),i[f])?f:null})}var k=typeof a==="string"?a.match(bc):
+"";if(k)switch(k[2]){case "visIdx":case "visible":b=parseInt(k[1],10);if(b<0){var n=h.map(g,function(a,b){return a.bVisible?b:null});return[n[n.length+b]]}return[aa(c,b)];case "name":return h.map(j,function(a,b){return a===k[1]?b:null});default:return[]}if(a.nodeName&&a._DT_CellIndex)return[a._DT_CellIndex.column];b=h(i).filter(a).map(function(){return h.inArray(this,i)}).toArray();if(b.length||!a.nodeName)return b;b=h(a).closest("*[data-dt-column]");return b.length?[b.data("dt-column")]:[]},c,f)},
+1);c.selector.cols=a;c.selector.opts=b;return c});u("columns().header()","column().header()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].nTh},1)});u("columns().footer()","column().footer()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].nTf},1)});u("columns().data()","column().data()",function(){return this.iterator("column-rows",Ub,1)});u("columns().dataSrc()","column().dataSrc()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].mData},
+1)});u("columns().cache()","column().cache()",function(a){return this.iterator("column-rows",function(b,c,d,e,f){return ja(b.aoData,f,"search"===a?"_aFilterData":"_aSortData",c)},1)});u("columns().nodes()","column().nodes()",function(){return this.iterator("column-rows",function(a,b,c,d,e){return ja(a.aoData,e,"anCells",b)},1)});u("columns().visible()","column().visible()",function(a,b){var c=this.iterator("column",function(b,c){if(a===k)return b.aoColumns[c].bVisible;var f=b.aoColumns,g=f[c],j=b.aoData,
+i,m,l;if(a!==k&&g.bVisible!==a){if(a){var n=h.inArray(!0,D(f,"bVisible"),c+1);i=0;for(m=j.length;i<m;i++)l=j[i].nTr,f=j[i].anCells,l&&l.insertBefore(f[c],f[n]||null)}else h(D(b.aoData,"anCells",c)).detach();g.bVisible=a;fa(b,b.aoHeader);fa(b,b.aoFooter);b.aiDisplay.length||h(b.nTBody).find("td[colspan]").attr("colspan",V(b));xa(b)}});a!==k&&(this.iterator("column",function(c,e){r(c,null,"column-visibility",[c,e,a,b])}),(b===k||b)&&this.columns.adjust());return c});u("columns().indexes()","column().index()",
+function(a){return this.iterator("column",function(b,c){return"visible"===a?ba(b,c):c},1)});o("columns.adjust()",function(){return this.iterator("table",function(a){$(a)},1)});o("column.index()",function(a,b){if(0!==this.context.length){var c=this.context[0];if("fromVisible"===a||"toData"===a)return aa(c,b);if("fromData"===a||"toVisible"===a)return ba(c,b)}});o("column()",function(a,b){return cb(this.columns(a,b))});o("cells()",function(a,b,c){h.isPlainObject(a)&&(a.row===k?(c=a,a=null):(c=b,b=null));
+h.isPlainObject(b)&&(c=b,b=null);if(null===b||b===k)return this.iterator("table",function(b){var d=a,e=bb(c),f=b.aoData,g=Ba(b,e),j=Qb(ja(f,g,"anCells")),i=h([].concat.apply([],j)),l,m=b.aoColumns.length,n,o,u,s,r,v;return ab("cell",d,function(a){var c=typeof a==="function";if(a===null||a===k||c){n=[];o=0;for(u=g.length;o<u;o++){l=g[o];for(s=0;s<m;s++){r={row:l,column:s};if(c){v=f[l];a(r,B(b,l,s),v.anCells?v.anCells[s]:null)&&n.push(r)}else n.push(r)}}return n}if(h.isPlainObject(a))return a.column!==
+k&&a.row!==k&&h.inArray(a.row,g)!==-1?[a]:[];c=i.filter(a).map(function(a,b){return{row:b._DT_CellIndex.row,column:b._DT_CellIndex.column}}).toArray();if(c.length||!a.nodeName)return c;v=h(a).closest("*[data-dt-row]");return v.length?[{row:v.data("dt-row"),column:v.data("dt-column")}]:[]},b,e)});var d=this.columns(b),e=this.rows(a),f,g,j,i,m;this.iterator("table",function(a,b){f=[];g=0;for(j=e[b].length;g<j;g++){i=0;for(m=d[b].length;i<m;i++)f.push({row:e[b][g],column:d[b][i]})}},1);var l=this.cells(f,
+c);h.extend(l.selector,{cols:b,rows:a,opts:c});return l});u("cells().nodes()","cell().node()",function(){return this.iterator("cell",function(a,b,c){return(a=a.aoData[b])&&a.anCells?a.anCells[c]:k},1)});o("cells().data()",function(){return this.iterator("cell",function(a,b,c){return B(a,b,c)},1)});u("cells().cache()","cell().cache()",function(a){a="search"===a?"_aFilterData":"_aSortData";return this.iterator("cell",function(b,c,d){return b.aoData[c][a][d]},1)});u("cells().render()","cell().render()",
+function(a){return this.iterator("cell",function(b,c,d){return B(b,c,d,a)},1)});u("cells().indexes()","cell().index()",function(){return this.iterator("cell",function(a,b,c){return{row:b,column:c,columnVisible:ba(a,c)}},1)});u("cells().invalidate()","cell().invalidate()",function(a){return this.iterator("cell",function(b,c,d){da(b,c,a,d)})});o("cell()",function(a,b,c){return cb(this.cells(a,b,c))});o("cell().data()",function(a){var b=this.context,c=this[0];if(a===k)return b.length&&c.length?B(b[0],
+c[0].row,c[0].column):k;jb(b[0],c[0].row,c[0].column,a);da(b[0],c[0].row,"data",c[0].column);return this});o("order()",function(a,b){var c=this.context;if(a===k)return 0!==c.length?c[0].aaSorting:k;"number"===typeof a?a=[[a,b]]:a.length&&!h.isArray(a[0])&&(a=Array.prototype.slice.call(arguments));return this.iterator("table",function(b){b.aaSorting=a.slice()})});o("order.listener()",function(a,b,c){return this.iterator("table",function(d){Ma(d,a,b,c)})});o("order.fixed()",function(a){if(!a){var b=
+this.context,b=b.length?b[0].aaSortingFixed:k;return h.isArray(b)?{pre:b}:b}return this.iterator("table",function(b){b.aaSortingFixed=h.extend(!0,{},a)})});o(["columns().order()","column().order()"],function(a){var b=this;return this.iterator("table",function(c,d){var e=[];h.each(b[d],function(b,c){e.push([c,a])});c.aaSorting=e})});o("search()",function(a,b,c,d){var e=this.context;return a===k?0!==e.length?e[0].oPreviousSearch.sSearch:k:this.iterator("table",function(e){e.oFeatures.bFilter&&ga(e,
+h.extend({},e.oPreviousSearch,{sSearch:a+"",bRegex:null===b?!1:b,bSmart:null===c?!0:c,bCaseInsensitive:null===d?!0:d}),1)})});u("columns().search()","column().search()",function(a,b,c,d){return this.iterator("column",function(e,f){var g=e.aoPreSearchCols;if(a===k)return g[f].sSearch;e.oFeatures.bFilter&&(h.extend(g[f],{sSearch:a+"",bRegex:null===b?!1:b,bSmart:null===c?!0:c,bCaseInsensitive:null===d?!0:d}),ga(e,e.oPreviousSearch,1))})});o("state()",function(){return this.context.length?this.context[0].oSavedState:
+null});o("state.clear()",function(){return this.iterator("table",function(a){a.fnStateSaveCallback.call(a.oInstance,a,{})})});o("state.loaded()",function(){return this.context.length?this.context[0].oLoadedState:null});o("state.save()",function(){return this.iterator("table",function(a){xa(a)})});n.versionCheck=n.fnVersionCheck=function(a){for(var b=n.version.split("."),a=a.split("."),c,d,e=0,f=a.length;e<f;e++)if(c=parseInt(b[e],10)||0,d=parseInt(a[e],10)||0,c!==d)return c>d;return!0};n.isDataTable=
+n.fnIsDataTable=function(a){var b=h(a).get(0),c=!1;if(a instanceof n.Api)return!0;h.each(n.settings,function(a,e){var f=e.nScrollHead?h("table",e.nScrollHead)[0]:null,g=e.nScrollFoot?h("table",e.nScrollFoot)[0]:null;if(e.nTable===b||f===b||g===b)c=!0});return c};n.tables=n.fnTables=function(a){var b=!1;h.isPlainObject(a)&&(b=a.api,a=a.visible);var c=h.map(n.settings,function(b){if(!a||a&&h(b.nTable).is(":visible"))return b.nTable});return b?new s(c):c};n.camelToHungarian=J;o("$()",function(a,b){var c=
+this.rows(b).nodes(),c=h(c);return h([].concat(c.filter(a).toArray(),c.find(a).toArray()))});h.each(["on","one","off"],function(a,b){o(b+"()",function(){var a=Array.prototype.slice.call(arguments);a[0]=h.map(a[0].split(/\s/),function(a){return!a.match(/\.dt\b/)?a+".dt":a}).join(" ");var d=h(this.tables().nodes());d[b].apply(d,a);return this})});o("clear()",function(){return this.iterator("table",function(a){oa(a)})});o("settings()",function(){return new s(this.context,this.context)});o("init()",function(){var a=
+this.context;return a.length?a[0].oInit:null});o("data()",function(){return this.iterator("table",function(a){return D(a.aoData,"_aData")}).flatten()});o("destroy()",function(a){a=a||!1;return this.iterator("table",function(b){var c=b.nTableWrapper.parentNode,d=b.oClasses,e=b.nTable,f=b.nTBody,g=b.nTHead,j=b.nTFoot,i=h(e),f=h(f),k=h(b.nTableWrapper),l=h.map(b.aoData,function(a){return a.nTr}),o;b.bDestroying=!0;r(b,"aoDestroyCallback","destroy",[b]);a||(new s(b)).columns().visible(!0);k.off(".DT").find(":not(tbody *)").off(".DT");
+h(E).off(".DT-"+b.sInstance);e!=g.parentNode&&(i.children("thead").detach(),i.append(g));j&&e!=j.parentNode&&(i.children("tfoot").detach(),i.append(j));b.aaSorting=[];b.aaSortingFixed=[];wa(b);h(l).removeClass(b.asStripeClasses.join(" "));h("th, td",g).removeClass(d.sSortable+" "+d.sSortableAsc+" "+d.sSortableDesc+" "+d.sSortableNone);f.children().detach();f.append(l);g=a?"remove":"detach";i[g]();k[g]();!a&&c&&(c.insertBefore(e,b.nTableReinsertBefore),i.css("width",b.sDestroyWidth).removeClass(d.sTable),
+(o=b.asDestroyStripes.length)&&f.children().each(function(a){h(this).addClass(b.asDestroyStripes[a%o])}));c=h.inArray(b,n.settings);-1!==c&&n.settings.splice(c,1)})});h.each(["column","row","cell"],function(a,b){o(b+"s().every()",function(a){var d=this.selector.opts,e=this;return this.iterator(b,function(f,g,h,i,m){a.call(e[b](g,"cell"===b?h:d,"cell"===b?d:k),g,h,i,m)})})});o("i18n()",function(a,b,c){var d=this.context[0],a=S(a)(d.oLanguage);a===k&&(a=b);c!==k&&h.isPlainObject(a)&&(a=a[c]!==k?a[c]:
+a._);return a.replace("%d",c)});n.version="1.10.18";n.settings=[];n.models={};n.models.oSearch={bCaseInsensitive:!0,sSearch:"",bRegex:!1,bSmart:!0};n.models.oRow={nTr:null,anCells:null,_aData:[],_aSortData:null,_aFilterData:null,_sFilterRow:null,_sRowStripe:"",src:null,idx:-1};n.models.oColumn={idx:null,aDataSort:null,asSorting:null,bSearchable:null,bSortable:null,bVisible:null,_sManualType:null,_bAttrSrc:!1,fnCreatedCell:null,fnGetData:null,fnSetData:null,mData:null,mRender:null,nTh:null,nTf:null,
+sClass:null,sContentPadding:null,sDefaultContent:null,sName:null,sSortDataType:"std",sSortingClass:null,sSortingClassJUI:null,sTitle:null,sType:null,sWidth:null,sWidthOrig:null};n.defaults={aaData:null,aaSorting:[[0,"asc"]],aaSortingFixed:[],ajax:null,aLengthMenu:[10,25,50,100],aoColumns:null,aoColumnDefs:null,aoSearchCols:[],asStripeClasses:null,bAutoWidth:!0,bDeferRender:!1,bDestroy:!1,bFilter:!0,bInfo:!0,bLengthChange:!0,bPaginate:!0,bProcessing:!1,bRetrieve:!1,bScrollCollapse:!1,bServerSide:!1,
+bSort:!0,bSortMulti:!0,bSortCellsTop:!1,bSortClasses:!0,bStateSave:!1,fnCreatedRow:null,fnDrawCallback:null,fnFooterCallback:null,fnFormatNumber:function(a){return a.toString().replace(/\B(?=(\d{3})+(?!\d))/g,this.oLanguage.sThousands)},fnHeaderCallback:null,fnInfoCallback:null,fnInitComplete:null,fnPreDrawCallback:null,fnRowCallback:null,fnServerData:null,fnServerParams:null,fnStateLoadCallback:function(a){try{return JSON.parse((-1===a.iStateDuration?sessionStorage:localStorage).getItem("DataTables_"+
+a.sInstance+"_"+location.pathname))}catch(b){}},fnStateLoadParams:null,fnStateLoaded:null,fnStateSaveCallback:function(a,b){try{(-1===a.iStateDuration?sessionStorage:localStorage).setItem("DataTables_"+a.sInstance+"_"+location.pathname,JSON.stringify(b))}catch(c){}},fnStateSaveParams:null,iStateDuration:7200,iDeferLoading:null,iDisplayLength:10,iDisplayStart:0,iTabIndex:0,oClasses:{},oLanguage:{oAria:{sSortAscending:": activate to sort column ascending",sSortDescending:": activate to sort column descending"},
+oPaginate:{sFirst:"First",sLast:"Last",sNext:"Next",sPrevious:"Previous"},sEmptyTable:"No data available in table",sInfo:"Showing _START_ to _END_ of _TOTAL_ entries",sInfoEmpty:"Showing 0 to 0 of 0 entries",sInfoFiltered:"(filtered from _MAX_ total entries)",sInfoPostFix:"",sDecimal:"",sThousands:",",sLengthMenu:"Show _MENU_ entries",sLoadingRecords:"Loading...",sProcessing:"Processing...",sSearch:"Search:",sSearchPlaceholder:"",sUrl:"",sZeroRecords:"No matching records found"},oSearch:h.extend({},
+n.models.oSearch),sAjaxDataProp:"data",sAjaxSource:null,sDom:"lfrtip",searchDelay:null,sPaginationType:"simple_numbers",sScrollX:"",sScrollXInner:"",sScrollY:"",sServerMethod:"GET",renderer:null,rowId:"DT_RowId"};Z(n.defaults);n.defaults.column={aDataSort:null,iDataSort:-1,asSorting:["asc","desc"],bSearchable:!0,bSortable:!0,bVisible:!0,fnCreatedCell:null,mData:null,mRender:null,sCellType:"td",sClass:"",sContentPadding:"",sDefaultContent:null,sName:"",sSortDataType:"std",sTitle:null,sType:null,sWidth:null};
+Z(n.defaults.column);n.models.oSettings={oFeatures:{bAutoWidth:null,bDeferRender:null,bFilter:null,bInfo:null,bLengthChange:null,bPaginate:null,bProcessing:null,bServerSide:null,bSort:null,bSortMulti:null,bSortClasses:null,bStateSave:null},oScroll:{bCollapse:null,iBarWidth:0,sX:null,sXInner:null,sY:null},oLanguage:{fnInfoCallback:null},oBrowser:{bScrollOversize:!1,bScrollbarLeft:!1,bBounding:!1,barWidth:0},ajax:null,aanFeatures:[],aoData:[],aiDisplay:[],aiDisplayMaster:[],aIds:{},aoColumns:[],aoHeader:[],
+aoFooter:[],oPreviousSearch:{},aoPreSearchCols:[],aaSorting:null,aaSortingFixed:[],asStripeClasses:null,asDestroyStripes:[],sDestroyWidth:0,aoRowCallback:[],aoHeaderCallback:[],aoFooterCallback:[],aoDrawCallback:[],aoRowCreatedCallback:[],aoPreDrawCallback:[],aoInitComplete:[],aoStateSaveParams:[],aoStateLoadParams:[],aoStateLoaded:[],sTableId:"",nTable:null,nTHead:null,nTFoot:null,nTBody:null,nTableWrapper:null,bDeferLoading:!1,bInitialised:!1,aoOpenRows:[],sDom:null,searchDelay:null,sPaginationType:"two_button",
+iStateDuration:0,aoStateSave:[],aoStateLoad:[],oSavedState:null,oLoadedState:null,sAjaxSource:null,sAjaxDataProp:null,bAjaxDataGet:!0,jqXHR:null,json:k,oAjaxData:k,fnServerData:null,aoServerParams:[],sServerMethod:null,fnFormatNumber:null,aLengthMenu:null,iDraw:0,bDrawing:!1,iDrawError:-1,_iDisplayLength:10,_iDisplayStart:0,_iRecordsTotal:0,_iRecordsDisplay:0,oClasses:{},bFiltered:!1,bSorted:!1,bSortCellsTop:null,oInit:null,aoDestroyCallback:[],fnRecordsTotal:function(){return"ssp"==y(this)?1*this._iRecordsTotal:
+this.aiDisplayMaster.length},fnRecordsDisplay:function(){return"ssp"==y(this)?1*this._iRecordsDisplay:this.aiDisplay.length},fnDisplayEnd:function(){var a=this._iDisplayLength,b=this._iDisplayStart,c=b+a,d=this.aiDisplay.length,e=this.oFeatures,f=e.bPaginate;return e.bServerSide?!1===f||-1===a?b+d:Math.min(b+a,this._iRecordsDisplay):!f||c>d||-1===a?d:c},oInstance:null,sInstance:null,iTabIndex:0,nScrollHead:null,nScrollFoot:null,aLastSort:[],oPlugins:{},rowIdFn:null,rowId:null};n.ext=x={buttons:{},
+classes:{},builder:"-source-",errMode:"alert",feature:[],search:[],selector:{cell:[],column:[],row:[]},internal:{},legacy:{ajax:null},pager:{},renderer:{pageButton:{},header:{}},order:{},type:{detect:[],search:{},order:{}},_unique:0,fnVersionCheck:n.fnVersionCheck,iApiIndex:0,oJUIClasses:{},sVersion:n.version};h.extend(x,{afnFiltering:x.search,aTypes:x.type.detect,ofnSearch:x.type.search,oSort:x.type.order,afnSortData:x.order,aoFeatures:x.feature,oApi:x.internal,oStdClasses:x.classes,oPagination:x.pager});
+h.extend(n.ext.classes,{sTable:"dataTable",sNoFooter:"no-footer",sPageButton:"paginate_button",sPageButtonActive:"current",sPageButtonDisabled:"disabled",sStripeOdd:"odd",sStripeEven:"even",sRowEmpty:"dataTables_empty",sWrapper:"dataTables_wrapper",sFilter:"dataTables_filter",sInfo:"dataTables_info",sPaging:"dataTables_paginate paging_",sLength:"dataTables_length",sProcessing:"dataTables_processing",sSortAsc:"sorting_asc",sSortDesc:"sorting_desc",sSortable:"sorting",sSortableAsc:"sorting_asc_disabled",
+sSortableDesc:"sorting_desc_disabled",sSortableNone:"sorting_disabled",sSortColumn:"sorting_",sFilterInput:"",sLengthSelect:"",sScrollWrapper:"dataTables_scroll",sScrollHead:"dataTables_scrollHead",sScrollHeadInner:"dataTables_scrollHeadInner",sScrollBody:"dataTables_scrollBody",sScrollFoot:"dataTables_scrollFoot",sScrollFootInner:"dataTables_scrollFootInner",sHeaderTH:"",sFooterTH:"",sSortJUIAsc:"",sSortJUIDesc:"",sSortJUI:"",sSortJUIAscAllowed:"",sSortJUIDescAllowed:"",sSortJUIWrapper:"",sSortIcon:"",
+sJUIHeader:"",sJUIFooter:""});var Kb=n.ext.pager;h.extend(Kb,{simple:function(){return["previous","next"]},full:function(){return["first","previous","next","last"]},numbers:function(a,b){return[ia(a,b)]},simple_numbers:function(a,b){return["previous",ia(a,b),"next"]},full_numbers:function(a,b){return["first","previous",ia(a,b),"next","last"]},first_last_numbers:function(a,b){return["first",ia(a,b),"last"]},_numbers:ia,numbers_length:7});h.extend(!0,n.ext.renderer,{pageButton:{_:function(a,b,c,d,e,
+f){var g=a.oClasses,j=a.oLanguage.oPaginate,i=a.oLanguage.oAria.paginate||{},m,l,n=0,o=function(b,d){var k,s,u,r,v=function(b){Ta(a,b.data.action,true)};k=0;for(s=d.length;k<s;k++){r=d[k];if(h.isArray(r)){u=h("<"+(r.DT_el||"div")+"/>").appendTo(b);o(u,r)}else{m=null;l="";switch(r){case "ellipsis":b.append('<span class="ellipsis">&#x2026;</span>');break;case "first":m=j.sFirst;l=r+(e>0?"":" "+g.sPageButtonDisabled);break;case "previous":m=j.sPrevious;l=r+(e>0?"":" "+g.sPageButtonDisabled);break;case "next":m=
+j.sNext;l=r+(e<f-1?"":" "+g.sPageButtonDisabled);break;case "last":m=j.sLast;l=r+(e<f-1?"":" "+g.sPageButtonDisabled);break;default:m=r+1;l=e===r?g.sPageButtonActive:""}if(m!==null){u=h("<a>",{"class":g.sPageButton+" "+l,"aria-controls":a.sTableId,"aria-label":i[r],"data-dt-idx":n,tabindex:a.iTabIndex,id:c===0&&typeof r==="string"?a.sTableId+"_"+r:null}).html(m).appendTo(b);Wa(u,{action:r},v);n++}}}},s;try{s=h(b).find(H.activeElement).data("dt-idx")}catch(u){}o(h(b).empty(),d);s!==k&&h(b).find("[data-dt-idx="+
+s+"]").focus()}}});h.extend(n.ext.type.detect,[function(a,b){var c=b.oLanguage.sDecimal;return $a(a,c)?"num"+c:null},function(a){if(a&&!(a instanceof Date)&&!Zb.test(a))return null;var b=Date.parse(a);return null!==b&&!isNaN(b)||M(a)?"date":null},function(a,b){var c=b.oLanguage.sDecimal;return $a(a,c,!0)?"num-fmt"+c:null},function(a,b){var c=b.oLanguage.sDecimal;return Pb(a,c)?"html-num"+c:null},function(a,b){var c=b.oLanguage.sDecimal;return Pb(a,c,!0)?"html-num-fmt"+c:null},function(a){return M(a)||
+"string"===typeof a&&-1!==a.indexOf("<")?"html":null}]);h.extend(n.ext.type.search,{html:function(a){return M(a)?a:"string"===typeof a?a.replace(Mb," ").replace(Aa,""):""},string:function(a){return M(a)?a:"string"===typeof a?a.replace(Mb," "):a}});var za=function(a,b,c,d){if(0!==a&&(!a||"-"===a))return-Infinity;b&&(a=Ob(a,b));a.replace&&(c&&(a=a.replace(c,"")),d&&(a=a.replace(d,"")));return 1*a};h.extend(x.type.order,{"date-pre":function(a){a=Date.parse(a);return isNaN(a)?-Infinity:a},"html-pre":function(a){return M(a)?
+"":a.replace?a.replace(/<.*?>/g,"").toLowerCase():a+""},"string-pre":function(a){return M(a)?"":"string"===typeof a?a.toLowerCase():!a.toString?"":a.toString()},"string-asc":function(a,b){return a<b?-1:a>b?1:0},"string-desc":function(a,b){return a<b?1:a>b?-1:0}});Da("");h.extend(!0,n.ext.renderer,{header:{_:function(a,b,c,d){h(a.nTable).on("order.dt.DT",function(e,f,g,h){if(a===f){e=c.idx;b.removeClass(c.sSortingClass+" "+d.sSortAsc+" "+d.sSortDesc).addClass(h[e]=="asc"?d.sSortAsc:h[e]=="desc"?d.sSortDesc:
+c.sSortingClass)}})},jqueryui:function(a,b,c,d){h("<div/>").addClass(d.sSortJUIWrapper).append(b.contents()).append(h("<span/>").addClass(d.sSortIcon+" "+c.sSortingClassJUI)).appendTo(b);h(a.nTable).on("order.dt.DT",function(e,f,g,h){if(a===f){e=c.idx;b.removeClass(d.sSortAsc+" "+d.sSortDesc).addClass(h[e]=="asc"?d.sSortAsc:h[e]=="desc"?d.sSortDesc:c.sSortingClass);b.find("span."+d.sSortIcon).removeClass(d.sSortJUIAsc+" "+d.sSortJUIDesc+" "+d.sSortJUI+" "+d.sSortJUIAscAllowed+" "+d.sSortJUIDescAllowed).addClass(h[e]==
+"asc"?d.sSortJUIAsc:h[e]=="desc"?d.sSortJUIDesc:c.sSortingClassJUI)}})}}});var Vb=function(a){return"string"===typeof a?a.replace(/</g,"&lt;").replace(/>/g,"&gt;").replace(/"/g,"&quot;"):a};n.render={number:function(a,b,c,d,e){return{display:function(f){if("number"!==typeof f&&"string"!==typeof f)return f;var g=0>f?"-":"",h=parseFloat(f);if(isNaN(h))return Vb(f);h=h.toFixed(c);f=Math.abs(h);h=parseInt(f,10);f=c?b+(f-h).toFixed(c).substring(2):"";return g+(d||"")+h.toString().replace(/\B(?=(\d{3})+(?!\d))/g,
+a)+f+(e||"")}}},text:function(){return{display:Vb}}};h.extend(n.ext.internal,{_fnExternApiFunc:Lb,_fnBuildAjax:sa,_fnAjaxUpdate:lb,_fnAjaxParameters:ub,_fnAjaxUpdateDraw:vb,_fnAjaxDataSrc:ta,_fnAddColumn:Ea,_fnColumnOptions:ka,_fnAdjustColumnSizing:$,_fnVisibleToColumnIndex:aa,_fnColumnIndexToVisible:ba,_fnVisbleColumns:V,_fnGetColumns:ma,_fnColumnTypes:Ga,_fnApplyColumnDefs:ib,_fnHungarianMap:Z,_fnCamelToHungarian:J,_fnLanguageCompat:Ca,_fnBrowserDetect:gb,_fnAddData:O,_fnAddTr:na,_fnNodeToDataIndex:function(a,
+b){return b._DT_RowIndex!==k?b._DT_RowIndex:null},_fnNodeToColumnIndex:function(a,b,c){return h.inArray(c,a.aoData[b].anCells)},_fnGetCellData:B,_fnSetCellData:jb,_fnSplitObjNotation:Ja,_fnGetObjectDataFn:S,_fnSetObjectDataFn:N,_fnGetDataMaster:Ka,_fnClearTable:oa,_fnDeleteIndex:pa,_fnInvalidate:da,_fnGetRowElements:Ia,_fnCreateTr:Ha,_fnBuildHead:kb,_fnDrawHead:fa,_fnDraw:P,_fnReDraw:T,_fnAddOptionsHtml:nb,_fnDetectHeader:ea,_fnGetUniqueThs:ra,_fnFeatureHtmlFilter:pb,_fnFilterComplete:ga,_fnFilterCustom:yb,
+_fnFilterColumn:xb,_fnFilter:wb,_fnFilterCreateSearch:Pa,_fnEscapeRegex:Qa,_fnFilterData:zb,_fnFeatureHtmlInfo:sb,_fnUpdateInfo:Cb,_fnInfoMacros:Db,_fnInitialise:ha,_fnInitComplete:ua,_fnLengthChange:Ra,_fnFeatureHtmlLength:ob,_fnFeatureHtmlPaginate:tb,_fnPageChange:Ta,_fnFeatureHtmlProcessing:qb,_fnProcessingDisplay:C,_fnFeatureHtmlTable:rb,_fnScrollDraw:la,_fnApplyToChildren:I,_fnCalculateColumnWidths:Fa,_fnThrottle:Oa,_fnConvertToWidth:Eb,_fnGetWidestNode:Fb,_fnGetMaxLenString:Gb,_fnStringToCss:v,
+_fnSortFlatten:X,_fnSort:mb,_fnSortAria:Ib,_fnSortListener:Va,_fnSortAttachListener:Ma,_fnSortingClasses:wa,_fnSortData:Hb,_fnSaveState:xa,_fnLoadState:Jb,_fnSettingsFromNode:ya,_fnLog:K,_fnMap:F,_fnBindAction:Wa,_fnCallbackReg:z,_fnCallbackFire:r,_fnLengthOverflow:Sa,_fnRenderer:Na,_fnDataSource:y,_fnRowAttributes:La,_fnExtend:Xa,_fnCalculateEnd:function(){}});h.fn.dataTable=n;n.$=h;h.fn.dataTableSettings=n.settings;h.fn.dataTableExt=n.ext;h.fn.DataTable=function(a){return h(this).dataTable(a).api()};
+h.each(n,function(a,b){h.fn.DataTable[a]=b});return h.fn.dataTable});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_page.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_page.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_page.css
deleted file mode 100644
index b60ee7d..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/css/demo_page.css
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
- * General page setup
- */
-#dt_example {
-	font: 80%/1.45em "Lucida Grande", Verdana, Arial, Helvetica, sans-serif;
-	margin: 0;
-	padding: 0;
-	color: #333;
-	background-color: #fff;
-}
-
-
-#dt_example #container {
-	width: 800px;
-	margin: 30px auto;
-	padding: 0;
-}
-
-
-#dt_example #footer {
-	margin: 50px auto 0 auto;
-	padding: 0;
-}
-
-#dt_example #demo {
-	margin: 30px auto 0 auto;
-}
-
-#dt_example .demo_jui {
-	margin: 30px auto 0 auto;
-}
-
-#dt_example .big {
-	font-size: 1.3em;
-	font-weight: bold;
-	line-height: 1.6em;
-	color: #4E6CA3;
-}
-
-#dt_example .spacer {
-	height: 20px;
-	clear: both;
-}
-
-#dt_example .clear {
-	clear: both;
-}
-
-#dt_example pre {
-	padding: 15px;
-	background-color: #F5F5F5;
-	border: 1px solid #CCCCCC;
-}
-
-#dt_example h1 {
-	margin-top: 2em;
-	font-size: 1.3em;
-	font-weight: normal;
-	line-height: 1.6em;
-	color: #4E6CA3;
-	border-bottom: 1px solid #B0BED9;
-	clear: both;
-}
-
-#dt_example h2 {
-	font-size: 1.2em;
-	font-weight: normal;
-	line-height: 1.6em;
-	color: #4E6CA3;
-	clear: both;
-}
-
-#dt_example a {
-	color: #0063DC;
-	text-decoration: none;
-}
-
-#dt_example a:hover {
-	text-decoration: underline;
-}
-
-#dt_example ul {
-	color: #4E6CA3;
-}
-
-.css_right {
-	float: right;
-}
-
-.css_left {
-	float: left;
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: YARN-8915. Update the doc about the default value of maximum-container-assignments for capacity scheduler. Contributed by Zhankun Tang.

Posted by su...@apache.org.
YARN-8915. Update the doc about the default value of maximum-container-assignments for capacity scheduler. Contributed by Zhankun Tang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78ea897b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78ea897b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78ea897b

Branch: refs/heads/HDFS-12943
Commit: 78ea897b6701c82799a6231103ca7ee1f8b4efde
Parents: 1d90a0d
Author: Weiwei Yang <ww...@apache.org>
Authored: Mon Oct 29 12:04:32 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Mon Oct 29 12:04:32 2018 +0800

----------------------------------------------------------------------
 .../hadoop-yarn-site/src/site/markdown/CapacityScheduler.md        | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78ea897b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index c2d4ce4..33d2b13 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -416,7 +416,7 @@ Note, this feature should be disabled if YARN is deployed separately with the fi
 | Property | Description |
 |:---- |:---- |
 | `yarn.scheduler.capacity.per-node-heartbeat.multiple-assignments-enabled` | Whether to allow multiple container assignments in one NodeManager heartbeat. Defaults to true. |
-| `yarn.scheduler.capacity.per-node-heartbeat.maximum-container-assignments` | If `multiple-assignments-enabled` is true, the maximum amount of containers that can be assigned in one NodeManager heartbeat. Defaults to -1, which sets no limit. |
+| `yarn.scheduler.capacity.per-node-heartbeat.maximum-container-assignments` | If `multiple-assignments-enabled` is true, the maximum amount of containers that can be assigned in one NodeManager heartbeat. Default value is 100, which limits the maximum number of container assignments per heartbeat to 100. Set this value to -1 will disable this limit. |
 | `yarn.scheduler.capacity.per-node-heartbeat.maximum-offswitch-assignments` | If `multiple-assignments-enabled` is true, the maximum amount of off-switch containers that can be assigned in one NodeManager heartbeat. Defaults to 1, which represents only one off-switch allocation allowed in one heartbeat. |
 
 ###Reviewing the configuration of the CapacityScheduler


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDFS-13942. [JDK10] Fix javadoc errors in hadoop-hdfs module. Contributed by Dinesh Chitlangia.

Posted by su...@apache.org.
HDFS-13942. [JDK10] Fix javadoc errors in hadoop-hdfs module. Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fac9f91b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fac9f91b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fac9f91b

Branch: refs/heads/HDFS-12943
Commit: fac9f91b2944cee641049fffcafa6b65e0cf68f2
Parents: e4f22b0
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Oct 31 14:43:58 2018 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Oct 31 14:43:58 2018 +0900

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    | 12 ++--
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  |  2 +-
 .../QJournalProtocolServerSideTranslatorPB.java |  2 +-
 .../token/block/BlockTokenSecretManager.java    |  2 +-
 .../hadoop/hdfs/server/balancer/Balancer.java   | 15 ++---
 .../server/blockmanagement/BlockManager.java    | 26 +++++----
 .../blockmanagement/BlockPlacementPolicy.java   |  1 -
 .../CombinedHostFileManager.java                |  6 +-
 .../blockmanagement/CorruptReplicasMap.java     |  2 +-
 .../blockmanagement/DatanodeAdminManager.java   |  8 +--
 .../server/blockmanagement/HostFileManager.java |  7 +--
 .../hdfs/server/blockmanagement/HostSet.java    |  8 +--
 .../server/blockmanagement/SlowPeerTracker.java |  5 +-
 .../server/datanode/BlockPoolSliceStorage.java  | 60 ++++++++++++--------
 .../server/datanode/BlockRecoveryWorker.java    | 15 +++--
 .../hdfs/server/datanode/BlockScanner.java      |  6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   | 10 ++--
 .../hdfs/server/datanode/DataStorage.java       |  4 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |  1 -
 .../hdfs/server/datanode/FileIoProvider.java    |  3 -
 .../hdfs/server/datanode/VolumeScanner.java     |  4 +-
 .../server/datanode/checker/AbstractFuture.java | 13 ++---
 .../server/datanode/fsdataset/FsDatasetSpi.java | 12 ++--
 .../server/datanode/fsdataset/FsVolumeSpi.java  | 13 +++--
 .../datanode/metrics/OutlierDetector.java       |  3 +-
 .../diskbalancer/DiskBalancerException.java     |  1 -
 .../datamodel/DiskBalancerCluster.java          | 11 ++--
 .../datamodel/DiskBalancerDataNode.java         | 10 ++--
 .../diskbalancer/planner/GreedyPlanner.java     |  2 +-
 .../hadoop/hdfs/server/namenode/AclStorage.java | 18 +++---
 .../server/namenode/EncryptionZoneManager.java  | 42 +++++++++-----
 .../hdfs/server/namenode/FSDirectory.java       |  8 +--
 .../hdfs/server/namenode/FSNamesystem.java      | 24 ++++++--
 .../hadoop/hdfs/server/namenode/INode.java      |  4 +-
 .../hdfs/server/namenode/INodeReference.java    |  6 +-
 .../hdfs/server/namenode/INodesInPath.java      |  4 +-
 .../hdfs/server/namenode/JournalManager.java    |  2 +-
 .../hdfs/server/namenode/LeaseManager.java      |  2 +-
 .../server/namenode/MetaRecoveryContext.java    |  2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  6 +-
 .../hdfs/server/namenode/NamenodeFsck.java      |  9 ++-
 .../hadoop/hdfs/server/namenode/Quota.java      |  5 +-
 .../server/namenode/ReencryptionHandler.java    |  2 +-
 .../server/namenode/XAttrPermissionFilter.java  |  4 +-
 .../hdfs/server/namenode/XAttrStorage.java      |  8 +--
 .../snapshot/AbstractINodeDiffList.java         |  8 +--
 .../namenode/snapshot/DiffListBySkipList.java   |  9 +--
 .../sps/BlockStorageMovementNeeded.java         |  5 +-
 .../namenode/sps/DatanodeCacheManager.java      |  2 +-
 .../sps/StoragePolicySatisfyManager.java        | 14 +++--
 .../startupprogress/StartupProgressView.java    |  4 +-
 .../server/namenode/top/metrics/TopMetrics.java | 17 ++++--
 .../namenode/top/window/RollingWindow.java      | 18 +++---
 .../top/window/RollingWindowManager.java        |  2 +-
 .../protocol/BlockStorageMovementCommand.java   | 11 ++--
 .../hdfs/server/protocol/DatanodeProtocol.java  |  2 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  5 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java    |  2 +
 .../org/apache/hadoop/hdfs/tools/DFSck.java     | 13 +++--
 .../offlineEditsViewer/OfflineEditsViewer.java  |  4 +-
 .../offlineEditsViewer/OfflineEditsVisitor.java |  2 +-
 .../StatisticsEditsVisitor.java                 |  4 +-
 .../NameDistributionVisitor.java                |  4 +-
 .../java/org/apache/hadoop/hdfs/util/Diff.java  | 16 +++---
 .../org/apache/hadoop/hdfs/util/XMLUtils.java   |  4 +-
 65 files changed, 310 insertions(+), 246 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 6dd366f..8627268 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -160,7 +160,8 @@ public class DFSUtil {
   /**
    * Comparator for sorting DataNodeInfo[] based on
    * stale, decommissioned and entering_maintenance states.
-   * Order: live -> stale -> entering_maintenance -> decommissioned
+   * Order: live {@literal ->} stale {@literal ->} entering_maintenance
+   * {@literal ->} decommissioned
    */
   @InterfaceAudience.Private 
   public static class ServiceAndStaleComparator extends ServiceComparator {
@@ -390,7 +391,8 @@ public class DFSUtil {
    * @param conf Configuration
    * @param nsId the nameservice whose NNs addresses we want.
    * @param defaultValue default address to return in case key is not found.
-   * @return A map from nnId -> RPC address of each NN in the nameservice.
+   * @return A map from nnId {@literal ->} RPC address of each NN in the
+   * nameservice.
    */
   public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
       Configuration conf, String nsId, String defaultValue) {
@@ -1289,7 +1291,8 @@ public class DFSUtil {
    * @param conf configuration
    * @param protocol Protocol interface
    * @param service service that implements the protocol
-   * @param server RPC server to which the protocol & implementation is added to
+   * @param server RPC server to which the protocol &amp; implementation is
+   *               added to
    * @throws IOException
    */
   public static void addPBProtocol(Configuration conf, Class<?> protocol,
@@ -1357,7 +1360,8 @@ public class DFSUtil {
    * @param conf Configuration
    * @param nsId the nameservice whose NNs addresses we want.
    * @param defaultValue default address to return in case key is not found.
-   * @return A map from nnId -> Web address of each NN in the nameservice.
+   * @return A map from nnId {@literal ->} Web address of each NN in the
+   * nameservice.
    */
   public static Map<String, InetSocketAddress> getWebAddressesForNameserviceId(
       Configuration conf, String nsId, String defaultValue) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 8f482e3..77e40b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -201,7 +201,7 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
   /**
    * Very efficient encoding of the block report into a ByteString to avoid
    * the overhead of protobuf repeating fields.  Primitive repeating fields
-   * require re-allocs of an ArrayList<Long> and the associated (un)boxing
+   * require re-allocs of an ArrayList&lt;Long&gt; and the associated (un)boxing
    * overhead which puts pressure on GC.
    * 
    * The structure of the buffer is as follows:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
index 2ad19da..61e8fa3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java
@@ -168,7 +168,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
     return VOID_JOURNAL_RESPONSE;
   }
 
-  /** @see JournalProtocol#heartbeat */
+  /** @see QJournalProtocol#heartbeat */
   @Override
   public HeartbeatResponseProto heartbeat(RpcController controller,
       HeartbeatRequestProto req) throws ServiceException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 3b2e8d2..52bc52d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -220,7 +220,7 @@ public class BlockTokenSecretManager extends
   }
 
   /**
-   * Update block keys if update time > update interval.
+   * Update block keys if update time {@literal >} update interval.
    * @return true if the keys are updated.
    */
   public synchronized boolean updateKeys(final long updateTime) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index a58e391..d21d13c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -78,7 +78,7 @@ import com.google.common.base.Preconditions;
  * <p>SYNOPSIS
  * <pre>
  * To start:
- *      bin/start-balancer.sh [-threshold <threshold>]
+ *      bin/start-balancer.sh [-threshold {@literal <threshold>}]
  *      Example: bin/ start-balancer.sh 
  *                     start the balancer with a default threshold of 10%
  *               bin/ start-balancer.sh -threshold 5
@@ -113,13 +113,14 @@ import com.google.common.base.Preconditions;
  * <p>A system property that limits the balancer's use of bandwidth is 
  * defined in the default configuration file:
  * <pre>
- * <property>
- *   <name>dfs.datanode.balance.bandwidthPerSec</name>
- *   <value>1048576</value>
- * <description>  Specifies the maximum bandwidth that each datanode 
+ * &lt;property&gt;
+ *   &lt;name&gt;dfs.datanode.balance.bandwidthPerSec&lt;/name&gt;
+ *   &lt;value&gt;1048576&lt;/value&gt;
+ * &lt;description&gt;  Specifies the maximum bandwidth that each datanode
  * can utilize for the balancing purpose in term of the number of bytes 
- * per second. </description>
- * </property>
+ * per second.
+ * &lt;/description&gt;
+ * &lt;/property&gt;
  * </pre>
  * 
  * <p>This property determines the maximum speed at which a block will be 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5e14247..d74b523 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -143,12 +143,13 @@ import org.slf4j.LoggerFactory;
  * If any of the replica is in maintenance mode, the safety property
  * is extended as follows. These property still apply for the case of zero
  * maintenance replicas, thus we can use these safe property for all scenarios.
- * a. # of live replicas >= # of min replication for maintenance.
- * b. # of live replicas <= # of expected redundancy.
- * c. # of live replicas and maintenance replicas >= # of expected redundancy.
+ * a. # of live replicas &gt;= # of min replication for maintenance.
+ * b. # of live replicas &lt;= # of expected redundancy.
+ * c. # of live replicas and maintenance replicas &gt;= # of expected
+ * redundancy.
  *
  * For regular replication, # of min live replicas for maintenance is determined
- * by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to <=
+ * by DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY. This number has to &lt;=
  * DFS_NAMENODE_REPLICATION_MIN_KEY.
  * For erasure encoding, # of min live replicas for maintenance is
  * BlockInfoStriped#getRealDataBlockNum.
@@ -305,7 +306,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final double storageInfoDefragmentRatio;
 
   /**
-   * Mapping: Block -> { BlockCollection, datanodes, self ref }
+   * Mapping: Block {@literal ->} { BlockCollection, datanodes, self ref }
    * Updated only in response to client-sent information.
    */
   final BlocksMap blocksMap;
@@ -321,7 +322,9 @@ public class BlockManager implements BlockStatsMXBean {
   private final BlockReportProcessingThread blockReportThread =
       new BlockReportProcessingThread();
 
-  /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
+  /**
+   * Store blocks {@literal ->} datanodedescriptor(s) map of corrupt replicas.
+   */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
 
   /**
@@ -2105,7 +2108,7 @@ public class BlockManager implements BlockStatsMXBean {
    * Choose target datanodes for creating a new block.
    * 
    * @throws IOException
-   *           if the number of targets < minimum replication.
+   *           if the number of targets {@literal <} minimum replication.
    * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
    *      Set, long, List, BlockStoragePolicy, EnumSet)
    */
@@ -2487,7 +2490,8 @@ public class BlockManager implements BlockStatsMXBean {
 
   /**
    * The given storage is reporting all its blocks.
-   * Update the (storage-->block list) and (block-->storage list) maps.
+   * Update the (storage{@literal -->}block list) and
+   * (block{@literal -->}storage list) maps.
    *
    * @return true if all known storages of the given DN have finished reporting.
    * @throws IOException
@@ -3777,8 +3781,8 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Modify (block-->datanode) map. Possibly generate replication tasks, if the
-   * removed block is still valid.
+   * Modify (block{@literal -->}datanode) map. Possibly generate replication
+   * tasks, if the removed block is still valid.
    */
   public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
     blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
@@ -4341,7 +4345,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Get blocks to invalidate for <i>nodeId</i>
+   * Get blocks to invalidate for {@code nodeId}
    * in {@link #invalidateBlocks}.
    *
    * @return number of blocks scheduled for removal during this iteration.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index 23e3e40..897bf69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -152,7 +152,6 @@ public abstract class BlockPlacementPolicy {
 
   /**
    * Check if the move is allowed. Used by balancer and other tools.
-   * @
    *
    * @param candidates all replicas including source and target
    * @param source source replica of the move

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
index d6a0972..d607789 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CombinedHostFileManager.java
@@ -47,13 +47,11 @@ import org.apache.hadoop.hdfs.util.CombinedHostsFileReader;
 /**
  * This class manages datanode configuration using a json file.
  * Please refer to {@link CombinedHostsFileReader} for the json format.
- * <p/>
- * <p/>
+ * <p>
  * Entries may or may not specify a port.  If they don't, we consider
  * them to apply to every DataNode on that host. The code canonicalizes the
  * entries into IP addresses.
- * <p/>
- * <p/>
+ * <p>
  * The code ignores all entries that the DNS fails to resolve their IP
  * addresses. This is okay because by default the NN rejects the registrations
  * of DNs when it fails to do a forward and reverse lookup. Note that DNS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index fe1224c..fc31584 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -38,7 +38,7 @@ import com.google.common.annotations.VisibleForTesting;
  * corrupt. While reporting replicas of a Block, we hide any corrupt
  * copies. These copies are removed once Block is found to have 
  * expected number of good replicas.
- * Mapping: Block -> TreeSet<DatanodeDescriptor> 
+ * Mapping: Block {@literal -> TreeSet<DatanodeDescriptor>}
  */
 
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
index a1dff08..abc0f7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
@@ -57,7 +57,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * Manages decommissioning and maintenance state for DataNodes. A background
  * monitor thread periodically checks the status of DataNodes that are
  * decommissioning or entering maintenance state.
- * <p/>
+ * <p>
  * A DataNode can be decommissioned in a few situations:
  * <ul>
  * <li>If a DN is dead, it is decommissioned immediately.</li>
@@ -72,11 +72,11 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * determine if they can be DECOMMISSIONED. The monitor also prunes this list
  * as blocks become replicated, so monitor scans will become more efficient
  * over time.
- * <p/>
+ * <p>
  * DECOMMISSION_INPROGRESS nodes that become dead do not progress to
  * DECOMMISSIONED until they become live again. This prevents potential
  * durability loss for singly-replicated blocks (see HDFS-6791).
- * <p/>
+ * <p>
  * DataNodes can also be put under maintenance state for any short duration
  * maintenance operations. Unlike decommissioning, blocks are not always
  * re-replicated for the DataNodes to enter maintenance state. When the
@@ -88,7 +88,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
  * of maintenance expiry time. When DataNodes don't transition or join the
  * cluster back by expiry time, blocks are re-replicated just as in
  * decommissioning case as to avoid read or write performance degradation.
- * <p/>
+ * <p>
  * This class depends on the FSNamesystem lock for synchronization.
  */
 @InterfaceAudience.Private

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
index b7bf674..4ead0ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
@@ -33,17 +33,16 @@ import java.util.HashSet;
 
 /**
  * This class manages the include and exclude files for HDFS.
- * <p/>
+ * <p>
  * These files control which DataNodes the NameNode expects to see in the
  * cluster.  Loosely speaking, the include file, if it exists and is not
  * empty, is a list of everything we expect to see.  The exclude file is
  * a list of everything we want to ignore if we do see it.
- * <p/>
+ * <p>
  * Entries may or may not specify a port.  If they don't, we consider
  * them to apply to every DataNode on that host. The code canonicalizes the
  * entries into IP addresses.
- * <p/>
- * <p/>
+ * <p>
  * The code ignores all entries that the DNS fails to resolve their IP
  * addresses. This is okay because by default the NN rejects the registrations
  * of DNs when it fails to do a forward and reverse lookup. Note that DNS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java
index 958557b..cf7cfac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java
@@ -35,9 +35,9 @@ import java.util.Map;
 
 /**
  * The HostSet allows efficient queries on matching wildcard addresses.
- * <p/>
+ * <p>
  * For InetSocketAddress A and B with the same host address,
- * we define a partial order between A and B, A <= B iff A.getPort() == B
+ * we define a partial order between A and B, A &lt;= B iff A.getPort() == B
  * .getPort() || B.getPort() == 0.
  */
 public class HostSet implements Iterable<InetSocketAddress> {
@@ -46,7 +46,7 @@ public class HostSet implements Iterable<InetSocketAddress> {
 
   /**
    * The function that checks whether there exists an entry foo in the set
-   * so that foo <= addr.
+   * so that foo &lt;= addr.
    */
   boolean matchedBy(InetSocketAddress addr) {
     Collection<Integer> ports = addrs.get(addr.getAddress());
@@ -56,7 +56,7 @@ public class HostSet implements Iterable<InetSocketAddress> {
 
   /**
    * The function that checks whether there exists an entry foo in the set
-   * so that addr <= foo.
+   * so that addr &lt;= foo.
    */
   boolean match(InetSocketAddress addr) {
     int port = addr.getPort();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
index 22983ea..03a6918 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -82,7 +83,7 @@ public class SlowPeerTracker {
 
   /**
    * Information about peers that have reported a node as being slow.
-   * Each outer map entry is a map of (DatanodeId) -> (timestamp),
+   * Each outer map entry is a map of (DatanodeId) {@literal ->} (timestamp),
    * mapping reporting nodes to the timestamp of the last report from
    * that node.
    *
@@ -146,7 +147,7 @@ public class SlowPeerTracker {
   /**
    * Retrieve all reports for all nodes. Stale reports are excluded.
    *
-   * @return map from SlowNodeId -> (set of nodes reporting peers).
+   * @return map from SlowNodeId {@literal ->} (set of nodes reporting peers).
    */
   public Map<String, SortedSet<String>> getReportsForAllDataNodes() {
     if (allReports.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 4fc47d8..539baf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -56,9 +56,9 @@ import com.google.common.collect.Lists;
  * block pool id, on this DataNode.
  * 
  * This class supports the following functionality:
- * <ol>
+ * <ul>
  * <li> Formatting a new block pool storage</li>
- * <li> Recovering a storage state to a consistent state (if possible></li>
+ * <li> Recovering a storage state to a consistent state (if possible)</li>
  * <li> Taking a snapshot of the block pool during upgrade</li>
  * <li> Rolling back a block pool to a previous snapshot</li>
  * <li> Finalizing block storage by deletion of a snapshot</li>
@@ -139,11 +139,12 @@ public class BlockPoolSliceStorage extends Storage {
 
   /**
    * Load one storage directory. Recover from previous transitions if required.
-   *
-   * @param nsInfo namespace information
-   * @param dataDir the root path of the storage directory
-   * @param startOpt startup option
-   * @return the StorageDirectory successfully loaded.
+   * @param nsInfo  namespace information
+   * @param location  the root path of the storage directory
+   * @param startOpt  startup option
+   * @param callables list of callable storage directory
+   * @param conf configuration
+   * @return
    * @throws IOException
    */
   private StorageDirectory loadStorageDirectory(NamespaceInfo nsInfo,
@@ -205,8 +206,10 @@ public class BlockPoolSliceStorage extends Storage {
    * data volume.
    *
    * @param nsInfo namespace information
-   * @param dataDirs storage directories of block pool
+   * @param location storage directories of block pool
    * @param startOpt startup option
+   * @param callables list of callable storage directory
+   * @param conf configuration
    * @return an array of loaded block pool directories.
    * @throws IOException on error
    */
@@ -240,8 +243,10 @@ public class BlockPoolSliceStorage extends Storage {
    * data volume.
    *
    * @param nsInfo namespace information
-   * @param dataDirs storage directories of block pool
+   * @param location storage directories of block pool
    * @param startOpt startup option
+   * @param callables list of callable storage directory
+   * @param conf configuration
    * @throws IOException on error
    */
   List<StorageDirectory> recoverTransitionRead(NamespaceInfo nsInfo,
@@ -348,13 +353,18 @@ public class BlockPoolSliceStorage extends Storage {
    * Analyze whether a transition of the BP state is required and
    * perform it if necessary.
    * <br>
-   * Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime.
-   * Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime Regular
-   * startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
+   * Rollback if:
+   * previousLV &gt;= LAYOUT_VERSION && prevCTime &lt;= namenode.cTime.
+   * Upgrade if:
+   * this.LV &gt; LAYOUT_VERSION || this.cTime &lt; namenode.cTime
+   * Regular startup if:
+   * this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
    * 
-   * @param sd storage directory <SD>/current/<bpid>
+   * @param sd storage directory @{literal <SD>/current/<bpid>}
    * @param nsInfo namespace info
    * @param startOpt startup option
+   * @param callables list of callable storage directory
+   * @param conf configuration
    * @return true if the new properties has been written.
    */
   private boolean doTransition(StorageDirectory sd, NamespaceInfo nsInfo,
@@ -416,20 +426,20 @@ public class BlockPoolSliceStorage extends Storage {
   }
 
   /**
-   * Upgrade to any release after 0.22 (0.22 included) release e.g. 0.22 => 0.23
+   * Upgrade to any release after 0.22 (0.22 included) release
+   * e.g. 0.22 =&gt; 0.23
    * Upgrade procedure is as follows:
    * <ol>
-   * <li>If <SD>/current/<bpid>/previous exists then delete it</li>
-   * <li>Rename <SD>/current/<bpid>/current to
-   * <SD>/current/bpid/current/previous.tmp</li>
-   * <li>Create new <SD>current/<bpid>/current directory</li>
-   * <ol>
+   * <li>If {@literal <SD>/current/<bpid>/previous} exists then delete it</li>
+   * <li>Rename {@literal <SD>/current/<bpid>/current} to
+   * {@literal <SD>/current/bpid/current/previous.tmp}</li>
+   * <li>Create new {@literal <SD>current/<bpid>/current} directory</li>
    * <li>Hard links for block files are created from previous.tmp to current</li>
    * <li>Save new version file in current directory</li>
+   * <li>Rename previous.tmp to previous</li>
    * </ol>
-   * <li>Rename previous.tmp to previous</li> </ol>
    * 
-   * @param bpSd storage directory <SD>/current/<bpid>
+   * @param bpSd storage directory {@literal <SD>/current/<bpid>}
    * @param nsInfo Namespace Info from the namenode
    * @throws IOException on error
    */
@@ -777,12 +787,12 @@ public class BlockPoolSliceStorage extends Storage {
   }
 
   /**
-   * Get a target subdirectory under current/ for a given block file that is being
-   * restored from trash.
+   * Get a target subdirectory under current/ for a given block file that is
+   * being restored from trash.
    *
    * The subdirectory structure under trash/ mirrors that under current/ to keep
    * implicit memory of where the files are to be restored.
-   *
+   * @param blockFile  block file that is being restored from trash.
    * @return the target directory to restore a previously deleted block file.
    */
   @VisibleForTesting
@@ -847,6 +857,7 @@ public class BlockPoolSliceStorage extends Storage {
   /**
    * Create a rolling upgrade marker file for each BP storage root, if it
    * does not exist already.
+   * @param dnStorageDirs
    */
   public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
       throws IOException {
@@ -872,6 +883,7 @@ public class BlockPoolSliceStorage extends Storage {
    * Check whether the rolling upgrade marker file exists for each BP storage
    * root. If it does exist, then the marker file is cleared and more
    * importantly the layout upgrade is finalized.
+   * @param dnStorageDirs
    */
   public void clearRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 34f6c33..fe0c7f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -338,19 +338,24 @@ public class BlockRecoveryWorker {
 
   /**
    * blk_0  blk_1  blk_2  blk_3  blk_4  blk_5  blk_6  blk_7  blk_8
-   *  64k    64k    64k    64k    64k    64k    64k    64k    64k   <-- stripe_0
+   *  64k    64k    64k    64k    64k    64k    64k    64k    64k   &lt;--
+   *  stripe_0
    *  64k    64k    64k    64k    64k    64k    64k    64k    64k
-   *  64k    64k    64k    64k    64k    64k    64k    61k    <-- startStripeIdx
+   *  64k    64k    64k    64k    64k    64k    64k    61k    &lt;--
+   *  startStripeIdx
    *  64k    64k    64k    64k    64k    64k    64k
    *  64k    64k    64k    64k    64k    64k    59k
    *  64k    64k    64k    64k    64k    64k
-   *  64k    64k    64k    64k    64k    64k                <-- last full stripe
-   *  64k    64k    13k    64k    55k     3k              <-- target last stripe
+   *  64k    64k    64k    64k    64k    64k                &lt;--
+   *  last full stripe
+   *  64k    64k    13k    64k    55k     3k              &lt;--
+   *  target last stripe
    *  64k    64k           64k     1k
    *  64k    64k           58k
    *  64k    64k
    *  64k    19k
-   *  64k                                               <-- total visible stripe
+   *  64k                                               &lt;--
+   *  total visible stripe
    *
    *  Due to different speed of streamers, the internal blocks in a block group
    *  could have different lengths when the block group isn't ended normally.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index 8081895..6b1b96f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -189,7 +189,7 @@ public class BlockScanner {
   }
 
   /**
-   * Returns true if the block scanner is enabled.<p/>
+   * Returns true if the block scanner is enabled.
    *
    * If the block scanner is disabled, no volume scanners will be created, and
    * no threads will start.
@@ -234,7 +234,7 @@ public class BlockScanner {
   }
 
   /**
-   * Stops and removes a volume scanner.<p/>
+   * Stops and removes a volume scanner.
    *
    * This function will block until the volume scanner has stopped.
    *
@@ -260,7 +260,7 @@ public class BlockScanner {
   }
 
   /**
-   * Stops and removes all volume scanners.<p/>
+   * Stops and removes all volume scanners.
    *
    * This function will block until all the volume scanners have stopped.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 40f80a9..99c0a87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -242,7 +242,7 @@ import org.slf4j.LoggerFactory;
  * DataNodes.
  *
  * The DataNode maintains just one critical table:
- *   block-> stream of bytes (of BLOCK_SIZE or less)
+ *   block{@literal ->} stream of bytes (of BLOCK_SIZE or less)
  *
  * This info is stored on a local disk.  The DataNode
  * reports the table's contents to the NameNode upon startup
@@ -527,7 +527,7 @@ public class DataNode extends ReconfigurableBase
   }
 
   /**
-   * {@inheritdoc}.
+   * {@inheritDoc }.
    */
   @Override
   public String reconfigurePropertyImpl(String property, String newVal)
@@ -2713,7 +2713,8 @@ public class DataNode extends ReconfigurableBase
     return locations;
   }
 
-  /** Instantiate & Start a single datanode daemon and wait for it to finish.
+  /** Instantiate &amp; Start a single datanode daemon and wait for it to
+   * finish.
    *  If this thread is specifically interrupted, it will stop waiting.
    */
   @VisibleForTesting
@@ -2722,7 +2723,8 @@ public class DataNode extends ReconfigurableBase
     return createDataNode(args, conf, null);
   }
   
-  /** Instantiate & Start a single datanode daemon and wait for it to finish.
+  /** Instantiate &amp; Start a single datanode daemon and wait for it to
+   * finish.
    *  If this thread is specifically interrupted, it will stop waiting.
    */
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index a85ae32..a803c0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -215,7 +215,9 @@ public class DataStorage extends Storage {
 
   /**
    * VolumeBuilder holds the metadata (e.g., the storage directories) of the
-   * prepared volume returned from {@link prepareVolume()}. Calling {@link build()}
+   * prepared volume returned from
+   * {@link #prepareVolume(DataNode, StorageLocation, List)}.
+   * Calling {@link VolumeBuilder#build()}
    * to add the metadata to {@link DataStorage} so that this prepared volume can
    * be active.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 7ae9e45..445e021 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -280,7 +280,6 @@ public class DirectoryScanner implements Runnable {
   /**
    * Create a new directory scanner, but don't cycle it running yet.
    *
-   * @param datanode the parent datanode
    * @param dataset the dataset to scan
    * @param conf the Configuration object
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
index b8e08d0..6349062 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
@@ -178,7 +178,6 @@ public class FileIoProvider {
    * Call sync_file_range on the given file descriptor.
    *
    * @param  volume target volume. null if unavailable.
-   * @throws IOException
    */
   public void syncFileRange(
       @Nullable FsVolumeSpi volume, FileDescriptor outFd,
@@ -198,7 +197,6 @@ public class FileIoProvider {
    * Call posix_fadvise on the given file descriptor.
    *
    * @param  volume target volume. null if unavailable.
-   * @throws IOException
    */
   public void posixFadvise(
       @Nullable FsVolumeSpi volume, String identifier, FileDescriptor outFd,
@@ -394,7 +392,6 @@ public class FileIoProvider {
    * @param volume  target volume. null if unavailable.
    * @param fd  File descriptor object.
    * @return  FileOutputStream to the given file object.
-   * @throws  FileNotFoundException
    */
   public FileOutputStream getFileOutputStream(
       @Nullable FsVolumeSpi volume, FileDescriptor fd) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
index 181ef80..e0afb9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
@@ -45,8 +45,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * VolumeScanner scans a single volume.  Each VolumeScanner has its own thread.<p/>
- * They are all managed by the DataNode's BlockScanner.
+ * VolumeScanner scans a single volume.  Each VolumeScanner has its own thread.
+ * <p>They are all managed by the DataNode's BlockScanner.
  */
 public class VolumeScanner extends Thread {
   public static final Logger LOG =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
index 2719f71..ec2b656 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
@@ -24,7 +24,10 @@ import com.google.common.annotations.Beta;
 import com.google.common.annotations.GwtCompatible;
 import com.google.common.base.Preconditions;
 import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
 import com.google.common.util.concurrent.Uninterruptibles;
 import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
     .newUpdater;
@@ -52,9 +55,7 @@ import java.util.logging.Logger;
  * {@link ListeningExecutorService}, and deriving a {@code Future} from an
  * existing one, typically using methods like {@link Futures#transform
  * (ListenableFuture, com.google.common.base.Function) Futures.transform}
- * and {@link Futures#catching(ListenableFuture, Class,
- * com.google.common.base.Function, java.util.concurrent.Executor)
- * Futures.catching}.
+ * and its overloaded versions.
  * <p>
  * <p>This class implements all methods in {@code ListenableFuture}.
  * Subclasses should provide a way to set the result of the computation
@@ -1265,12 +1266,6 @@ public abstract class AbstractFuture<V> implements ListenableFuture<V> {
    *       r.run();
    *     }
    *   }}</pre>
-   * <p>
-   * <p>This should be preferred to {@link #newDirectExecutorService()}
-   * because implementing the {@link ExecutorService} subinterface
-   * necessitates significant performance overhead.
-   *
-   * @since 18.0
    */
   public static Executor directExecutor() {
     return DirectExecutor.INSTANCE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index f4bf839..bd9ed7f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -192,7 +192,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
   FsVolumeReferences getFsVolumeReferences();
 
   /**
-   * Add a new volume to the FsDataset.<p/>
+   * Add a new volume to the FsDataset.
    *
    * If the FSDataset supports block scanning, this function registers
    * the new volume with the block scanner.
@@ -226,7 +226,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
   /** @return the volume that contains a replica of the block. */
   V getVolume(ExtendedBlock b);
 
-  /** @return a volume information map (name => info). */
+  /** @return a volume information map (name {@literal =>} info). */
   Map<String, Object> getVolumeInfoMap();
 
   /**
@@ -273,7 +273,8 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
 
   /**
    * Get reference to the replica meta info in the replicasMap. 
-   * To be called from methods that are synchronized on {@link FSDataset}
+   * To be called from methods that are synchronized on
+   * implementations of {@link FsDatasetSpi}
    * @return replica from the replicas map
    */
   @Deprecated
@@ -394,7 +395,7 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * Finalizes the block previously opened for writing using writeToBlock.
    * The block size is what is in the parameter b and it must match the amount
    *  of data written
-   * @param block Block to be finalized
+   * @param b Block to be finalized
    * @param fsyncDir whether to sync the directory changes to durable device.
    * @throws IOException
    * @throws ReplicaNotFoundException if the replica can not be found when the
@@ -488,14 +489,13 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
   /**
    * Determine if the specified block is cached.
    * @param bpid Block pool id
-   * @param blockIds - block id
+   * @param blockId - block id
    * @return true if the block is cached
    */
   boolean isCached(String bpid, long blockId);
 
     /**
      * Check if all the data directories are healthy
-     * @return A set of unhealthy data directories.
      * @param failedVolumes
      */
   void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 7329ba3..be978d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -98,17 +98,17 @@ public interface FsVolumeSpi
 
   /**
    * BlockIterator will return ExtendedBlock entries from a block pool in
-   * this volume.  The entries will be returned in sorted order.<p/>
+   * this volume.  The entries will be returned in sorted order.<p>
    *
    * BlockIterator objects themselves do not always have internal
    * synchronization, so they can only safely be used by a single thread at a
-   * time.<p/>
+   * time.<p>
    *
    * Closing the iterator does not save it.  You must call save to save it.
    */
   interface BlockIterator extends Closeable {
     /**
-     * Get the next block.<p/>
+     * Get the next block.<p>
      *
      * Note that this block may be removed in between the time we list it,
      * and the time the caller tries to use it, or it may represent a stale
@@ -146,7 +146,7 @@ public interface FsVolumeSpi
     void save() throws IOException;
 
     /**
-     * Set the maximum staleness of entries that we will return.<p/>
+     * Set the maximum staleness of entries that we will return.<p>
      *
      * A maximum staleness of 0 means we will never return stale entries; a
      * larger value will allow us to reduce resource consumption in exchange
@@ -211,12 +211,12 @@ public interface FsVolumeSpi
    * Because millions of these structures may be created, we try to save
    * memory here.  So instead of storing full paths, we store path suffixes.
    * The block file, if it exists, will have a path like this:
-   * <volume_base_path>/<block_path>
+   * {@literal <volume_base_path>/<block_path>}
    * So we don't need to store the volume path, since we already know what the
    * volume is.
    *
    * The metadata file, if it exists, will have a path like this:
-   * <volume_base_path>/<block_path>_<genstamp>.meta
+   * {@literal <volume_base_path>/<block_path>_<genstamp>.meta}
    * So if we have a block file, there isn't any need to store the block path
    * again.
    *
@@ -439,6 +439,7 @@ public interface FsVolumeSpi
    * @param bpid block pool id to scan
    * @param report the list onto which blocks reports are placed
    * @param reportCompiler
+   * @throws InterruptedException
    * @throws IOException
    */
   void compileReport(String bpid,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java
index 771a17b..401fc8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/OutlierDetector.java
@@ -94,7 +94,8 @@ public class OutlierDetector {
 
   /**
    * Return a set of nodes/ disks whose latency is much higher than
-   * their counterparts. The input is a map of (resource -> aggregate latency)
+   * their counterparts. The input is a map of (resource {@literal ->} aggregate
+   * latency)
    * entries.
    *
    * The aggregate may be an arithmetic mean or a percentile e.g.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
index 642cf21..7824f95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
@@ -60,7 +60,6 @@ public class DiskBalancerException extends IOException {
   /**
    * Constructs an {@code IOException} with the specified detail message and
    * cause.
-   * <p/>
    * <p> Note that the detail message associated with {@code cause} is
    * <i>not</i>
    * automatically incorporated into this exception's detail message.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
index 8de19aa..1307983 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
@@ -50,21 +50,20 @@ import java.util.concurrent.Future;
 
 /**
  * DiskBalancerCluster represents the nodes that we are working against.
- * <p/>
+ * <p>
  * Please Note :
- * <p/>
  * Semantics of inclusionList and exclusionLists.
- * <p/>
+ * <p>
  * If a non-empty inclusionList is specified then the diskBalancer assumes that
  * the user is only interested in processing that list of nodes. This node list
  * is checked against the exclusionList and only the nodes in inclusionList but
  * not in exclusionList is processed.
- * <p/>
+ * <p>
  * if inclusionList is empty, then we assume that all live nodes in the nodes is
  * to be processed by diskBalancer. In that case diskBalancer will avoid any
  * nodes specified in the exclusionList but will process all nodes in the
  * cluster.
- * <p/>
+ * <p>
  * In other words, an empty inclusionList is means all the nodes otherwise
  * only a given list is processed and ExclusionList is always honored.
  */
@@ -291,7 +290,7 @@ public class DiskBalancerCluster {
   /**
    * Compute plan takes a node and constructs a planner that creates a plan that
    * we would like to follow.
-   * <p/>
+   * <p>
    * This function creates a thread pool and executes a planner on each node
    * that we are supposed to plan for. Each of these planners return a NodePlan
    * that we can persist or schedule for execution with a diskBalancer

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java
index a200f4d..6cf244b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java
@@ -186,7 +186,7 @@ public class DiskBalancerDataNode implements Comparable<DiskBalancerDataNode> {
   }
 
   /**
-   * returns NodeDataDensity Metric.
+   * Returns NodeDataDensity Metric.
    *
    * @return float
    */
@@ -195,8 +195,8 @@ public class DiskBalancerDataNode implements Comparable<DiskBalancerDataNode> {
   }
 
   /**
-   * computes nodes data density.
-   * <p/>
+   * Computes nodes data density.
+   *
    * This metric allows us to compare different  nodes and how well the data is
    * spread across a set of volumes inside the node.
    */
@@ -231,8 +231,8 @@ public class DiskBalancerDataNode implements Comparable<DiskBalancerDataNode> {
 
   /**
    * Adds a volume to the DataNode.
-   * <p/>
-   * it is assumed that we have one thread per node hence this call is not
+   *
+   * It is assumed that we have one thread per node hence this call is not
    * synchronised neither is the map is protected.
    *
    * @param volume - volume

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
index fb83eeb..568c1e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
@@ -34,7 +34,7 @@ import java.util.TreeSet;
 /**
  * Greedy Planner is a simple planner that computes the largest possible move at
  * any point of time given a volumeSet.
- * <p/>
+ * <p>
  * This is done by choosing the disks with largest  amount of data above and
  * below the idealStorage and then a move is scheduled between them.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
index 10ecc23..fa268c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
@@ -156,7 +156,7 @@ public final class AclStorage {
    *
    * @param inode INode to read
    * @param snapshotId int ID of snapshot to read
-   * @return List<AclEntry> containing extended inode ACL entries
+   * @return {@literal List<AclEntry>} containing extended inode ACL entries
    */
   public static List<AclEntry> readINodeAcl(INode inode, int snapshotId) {
     AclFeature f = inode.getAclFeature(snapshotId);
@@ -167,7 +167,7 @@ public final class AclStorage {
    * Reads the existing extended ACL entries of an INodeAttribute object.
    *
    * @param inodeAttr INode to read
-   * @return List<AclEntry> containing extended inode ACL entries
+   * @return {@code List<AclEntry>} containing extended inode ACL entries
    */
   public static List<AclEntry> readINodeAcl(INodeAttributes inodeAttr) {
     AclFeature f = inodeAttr.getAclFeature();
@@ -175,7 +175,7 @@ public final class AclStorage {
   }
 
   /**
-   * Build list of AclEntries from the AclFeature
+   * Build list of AclEntries from the {@link AclFeature}
    * @param aclFeature AclFeature
    * @return List of entries
    */
@@ -204,7 +204,7 @@ public final class AclStorage {
    * ACL modification APIs, which always apply a delta on top of current state.
    *
    * @param inode INode to read
-   * @return List<AclEntry> containing all logical inode ACL entries
+   * @return {@code List<AclEntry>} containing all logical inode ACL entries
    */
   public static List<AclEntry> readINodeLogicalAcl(INode inode) {
     FsPermission perm = inode.getFsPermission();
@@ -262,7 +262,7 @@ public final class AclStorage {
    * {@link AclFeature}.
    *
    * @param inode INode to update
-   * @param newAcl List<AclEntry> containing new ACL entries
+   * @param newAcl {@code List<AclEntry>} containing new ACL entries
    * @param snapshotId int latest snapshot ID of inode
    * @throws AclException if the ACL is invalid for the given inode
    * @throws QuotaExceededException if quota limit is exceeded
@@ -312,8 +312,8 @@ public final class AclStorage {
   /**
    * Creates an AclFeature from the given ACL entries.
    *
-   * @param accessEntries List<AclEntry> access ACL entries
-   * @param defaultEntries List<AclEntry> default ACL entries
+   * @param accessEntries {@code List<AclEntry>} access ACL entries
+   * @param defaultEntries {@code List<AclEntry>} default ACL entries
    * @return AclFeature containing the required ACL entries
    */
   private static AclFeature createAclFeature(List<AclEntry> accessEntries,
@@ -347,7 +347,7 @@ public final class AclStorage {
    * POSIX ACLs model, which presents the mask as the permissions of the group
    * class.
    *
-   * @param accessEntries List<AclEntry> access ACL entries
+   * @param accessEntries {@code List<AclEntry>} access ACL entries
    * @param existingPerm FsPermission existing permissions
    * @return FsPermission new permissions
    */
@@ -365,7 +365,7 @@ public final class AclStorage {
    * group and other permissions are in order.  Also preserve sticky bit and
    * toggle ACL bit off.
    *
-   * @param accessEntries List<AclEntry> access ACL entries
+   * @param accessEntries {@code List<AclEntry>} access ACL entries
    * @param existingPerm FsPermission existing permissions
    * @return FsPermission new permissions
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 5604a21..8fa9578 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -67,7 +67,7 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants
 
 /**
  * Manages the list of encryption zones in the filesystem.
- * <p/>
+ * <p>
  * The EncryptionZoneManager has its own lock, but relies on the FSDirectory
  * lock being held for many operations. The FSDirectory lock should not be
  * taken if the manager lock is already held.
@@ -294,7 +294,7 @@ public class EncryptionZoneManager {
 
   /**
    * Add a new encryption zone.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    *
    * @param inodeId of the encryption zone
@@ -308,7 +308,7 @@ public class EncryptionZoneManager {
 
   /**
    * Add a new encryption zone.
-   * <p/>
+   * <p>
    * Does not assume that the FSDirectory lock is held.
    *
    * @param inodeId of the encryption zone
@@ -326,7 +326,7 @@ public class EncryptionZoneManager {
 
   /**
    * Remove an encryption zone.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    */
   void removeEncryptionZone(Long inodeId) {
@@ -344,7 +344,7 @@ public class EncryptionZoneManager {
 
   /**
    * Returns true if an IIP is within an encryption zone.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    */
   boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException,
@@ -355,7 +355,7 @@ public class EncryptionZoneManager {
 
   /**
    * Returns the full path from an INode id.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    */
   String getFullPathName(Long nodeId) {
@@ -370,7 +370,7 @@ public class EncryptionZoneManager {
   /**
    * Get the key name for an encryption zone. Returns null if <tt>iip</tt> is
    * not within an encryption zone.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    */
   String getKeyName(final INodesInPath iip) throws IOException {
@@ -385,7 +385,7 @@ public class EncryptionZoneManager {
   /**
    * Looks up the EncryptionZoneInt for a path within an encryption zone.
    * Returns null if path is not within an EZ.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    */
   private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip)
@@ -434,7 +434,7 @@ public class EncryptionZoneManager {
    * Looks up the nearest ancestor EncryptionZoneInt that contains the given
    * path (excluding itself).
    * Returns null if path is not within an EZ, or the path is the root dir '/'
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    */
   private EncryptionZoneInt getParentEncryptionZoneForPath(INodesInPath iip)
@@ -467,7 +467,7 @@ public class EncryptionZoneManager {
   /**
    * Throws an exception if the provided path cannot be renamed into the
    * destination because of differing parent encryption zones.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    *
    * @param srcIIP source IIP
@@ -529,7 +529,7 @@ public class EncryptionZoneManager {
 
   /**
    * Create a new encryption zone.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    */
   XAttr createEncryptionZone(INodesInPath srcIIP, CipherSuite suite,
@@ -573,7 +573,7 @@ public class EncryptionZoneManager {
 
   /**
    * Cursor-based listing of encryption zones.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
    */
   BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
@@ -621,6 +621,8 @@ public class EncryptionZoneManager {
    * @param zoneId
    * @param zonePath
    * @return true if path resolve to the id, false if not.
+   * @throws AccessControlException
+   * @throws ParentNotDirectoryException
    * @throws UnresolvedLinkException
    */
   private boolean pathResolvesToId(final long zoneId, final String zonePath)
@@ -645,6 +647,9 @@ public class EncryptionZoneManager {
   /**
    * Re-encrypts the given encryption zone path. If the given path is not the
    * root of an encryption zone, an exception is thrown.
+   * @param zoneIIP
+   * @param keyVersionName
+   * @throws IOException
    */
   List<XAttr> reencryptEncryptionZone(final INodesInPath zoneIIP,
       final String keyVersionName) throws IOException {
@@ -673,7 +678,9 @@ public class EncryptionZoneManager {
   /**
    * Cancels the currently-running re-encryption of the given encryption zone.
    * If the given path is not the root of an encryption zone,
-   * * an exception is thrown.
+   * an exception is thrown.
+   * @param zoneIIP
+   * @throws IOException
    */
   List<XAttr> cancelReencryptEncryptionZone(final INodesInPath zoneIIP)
       throws IOException {
@@ -693,8 +700,10 @@ public class EncryptionZoneManager {
 
   /**
    * Cursor-based listing of zone re-encryption status.
-   * <p/>
+   * <p>
    * Called while holding the FSDirectory lock.
+   * @param prevId
+   * @throws IOException
    */
   BatchedListEntries<ZoneReencryptionStatus> listReencryptionStatus(
       final long prevId) throws IOException {
@@ -735,6 +744,10 @@ public class EncryptionZoneManager {
 
   /**
    * Return whether an INode is an encryption zone root.
+   * @param inode
+   * @param name
+   * @return true when INode is an encryption zone root else false
+   * @throws FileNotFoundException
    */
   boolean isEncryptionZoneRoot(final INode inode, final String name)
       throws FileNotFoundException {
@@ -756,6 +769,7 @@ public class EncryptionZoneManager {
    * Return whether an INode is an encryption zone root.
    *
    * @param inode the zone inode
+   * @param name
    * @throws IOException if the inode is not a directory,
    *                     or is a directory but not the root of an EZ.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 0140912..712a327 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -634,12 +634,10 @@ public class FSDirectory implements Closeable {
    *            no permission checks.
    * @param src The path to resolve.
    * @param dirOp The {@link DirOp} that controls additional checks.
-   * @param resolveLink If false, only ancestor symlinks will be checked.  If
-   *         true, the last inode will also be checked.
    * @return if the path indicates an inode, return path after replacing up to
-   *         <inodeid> with the corresponding path of the inode, else the path
-   *         in {@code src} as is. If the path refers to a path in the "raw"
-   *         directory, return the non-raw pathname.
+   *        {@code <inodeid>} with the corresponding path of the inode, else
+   *        the path in {@code src} as is. If the path refers to a path in
+   *        the "raw" directory, return the non-raw pathname.
    * @throws FileNotFoundException
    * @throws AccessControlException
    * @throws ParentNotDirectoryException

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index eda1164..d1904fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -341,10 +341,11 @@ import org.slf4j.LoggerFactory;
  *
  * This class and its contents keep:
  *
- * 1)  Valid fsname --> blocklist  (kept on disk, logged)
+ * 1)  Valid fsname {@literal -->} blocklist  (kept on disk, logged)
  * 2)  Set of all valid blocks (inverted #1)
- * 3)  block --> machinelist (kept in memory, rebuilt dynamically from reports)
- * 4)  machine --> blocklist (inverted #2)
+ * 3)  block {@literal -->} machinelist (kept in memory, rebuilt dynamically
+ *     from reports)
+ * 4)  machine {@literal -->} blocklist (inverted #2)
  * 5)  LRU cache of updated-heartbeat machines
  */
 @InterfaceAudience.Private
@@ -1732,11 +1733,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   }
 
   /**
-   * return a list of blocks & their locations on <code>datanode</code> whose
-   * total size is <code>size</code>
+   * return a list of blocks &amp; their locations on {@code datanode} whose
+   * total size is {@code size}
    *
    * @param datanode on which blocks are located
    * @param size total size of blocks
+   * @param minimumBlockSize
    */
   public BlocksWithLocations getBlocks(DatanodeID datanode, long size, long
       minimumBlockSize) throws IOException {
@@ -1753,6 +1755,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   /**
    * Dump all metadata into specified file
+   * @param filename
    */
   void metaSave(String filename) throws IOException {
     String operationName = "metaSave";
@@ -1884,6 +1887,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   /////////////////////////////////////////////////////////
   /**
    * Set permissions for an existing file.
+   * @param src
+   * @param permission
    * @throws IOException
    */
   void setPermission(String src, FsPermission permission) throws IOException {
@@ -1908,6 +1913,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   /**
    * Set owner for an existing file.
+   * @param src
+   * @param group
+   * @param username
    * @throws IOException
    */
   void setOwner(String src, String username, String group)
@@ -2188,6 +2196,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @param replication new replication
    * @return true if successful; 
    *         false if file does not exist or is a directory
+   * @throws  IOException
    */
   boolean setReplication(final String src, final short replication)
       throws IOException {
@@ -2219,6 +2228,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    *
    * @param src file/directory path
    * @param policyName storage policy name
+   * @throws  IOException
    */
   void setStoragePolicy(String src, String policyName) throws IOException {
     final String operationName = "setStoragePolicy";
@@ -2245,6 +2255,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * Satisfy the storage policy for a file or a directory.
    *
    * @param src file/directory path
+   * @throws  IOException
    */
   void satisfyStoragePolicy(String src, boolean logRetryCache)
       throws IOException {
@@ -2295,6 +2306,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * unset storage policy set for a given file or a directory.
    *
    * @param src file/directory path
+   * @throws  IOException
    */
   void unsetStoragePolicy(String src) throws IOException {
     final String operationName = "unsetStoragePolicy";
@@ -2321,6 +2333,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @param src
    *          file/directory path
    * @return storage policy object
+   * @throws  IOException
    */
   BlockStoragePolicy getStoragePolicy(String src) throws IOException {
     checkOperation(OperationCategory.READ);
@@ -2336,6 +2349,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
 
   /**
    * @return All the existing block storage policies
+   * @throws  IOException
    */
   BlockStoragePolicy[] getStoragePolicies() throws IOException {
     checkOperation(OperationCategory.READ);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 2123f4e..03b1ca3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -523,8 +523,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    * 2. For a {@link WithName} node, since the node must be in a snapshot, we 
    * only count the quota usage for those nodes that still existed at the 
    * creation time of the snapshot associated with the {@link WithName} node.
-   * We do not count in the size of the diff list.  
-   * <pre>
+   * We do not count in the size of the diff list.
+   * </pre>
    *
    * @param bsps Block storage policy suite to calculate intended storage type usage
    * @param blockStoragePolicyId block storage policy id of the current INode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index e4e14f7..8655bb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -669,10 +669,10 @@ public abstract class INodeReference extends INode {
     
     /**
      * {@inheritDoc}
-     * <br/>
+     * <br>
      * To destroy a DstReference node, we first remove its link with the 
-     * referred node. If the reference number of the referred node is <= 0, we 
-     * destroy the subtree of the referred node. Otherwise, we clean the 
+     * referred node. If the reference number of the referred node is &lt;= 0,
+     * we destroy the subtree of the referred node. Otherwise, we clean the
      * referred node's subtree and delete everything created after the last 
      * rename operation, i.e., everything outside of the scope of the prior 
      * WithName nodes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 04fb50e..f072220 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -329,8 +329,8 @@ public class INodesInPath {
   }
 
   /**
-   * @return the i-th inode if i >= 0;
-   *         otherwise, i < 0, return the (length + i)-th inode.
+   * @return the i-th inode if i {@literal >=} 0;
+   *         otherwise, i {@literal <} 0, return the (length + i)-th inode.
    */
   public INode getINode(int i) {
     return inodes[(i < 0) ? inodes.length + i : i];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
index d6d2094..7331676 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
@@ -112,7 +112,7 @@ public interface JournalManager extends Closeable, FormatConfirmable,
   void doRollback() throws IOException;
 
   /**
-   * Discard the segments whose first txid is >= the given txid.
+   * Discard the segments whose first txid is {@literal >=} the given txid.
    * @param startTxId The given txid should be right at the segment boundary, 
    * i.e., it should be the first txid of some segment, if segment corresponding
    * to the txid exists.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 31fb2bb..75db8de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -208,7 +208,7 @@ public class LeaseManager {
    * read or write lock.
    *
    * @param ancestorDir the ancestor {@link INodeDirectory}
-   * @return Set<INodesInPath>
+   * @return {@code Set<INodesInPath>}
    */
   public Set<INodesInPath> getINodeWithLeases(final INodeDirectory
       ancestorDir) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
index dad5779..ead56a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
@@ -52,7 +52,7 @@ public final class MetaRecoveryContext  {
    * Display a prompt to the user and get his or her choice.
    *  
    * @param prompt      The prompt to display
-   * @param default     First choice (will be taken if autoChooseDefault is
+   * @param firstChoice First choice (will be taken if autoChooseDefault is
    *                    true)
    * @param choices     Other choies
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 9f82cbd..b91e7ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -176,8 +176,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DE
  * is a second backup/failover NameNode, or when using federated NameNodes.)
  *
  * The NameNode controls two critical tables:
- *   1)  filename->blocksequence (namespace)
- *   2)  block->machinelist ("inodes")
+ *   1)  filename{@literal ->}blocksequence (namespace)
+ *   2)  block{@literal ->}machinelist ("inodes")
  *
  * The first table is stored on disk and is very precious.
  * The second table is rebuilt every time the NameNode comes up.
@@ -1111,7 +1111,7 @@ public class NameNode extends ReconfigurableBase implements
   }
 
   /**
-   * @return NameNodeHttpServer, used by unit tests to ensure a full shutdown,
+   * NameNodeHttpServer, used by unit tests to ensure a full shutdown,
    * so that no bind exception is thrown during restart.
    */
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac9f91b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 5d664cb..56607f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -94,14 +94,13 @@ import com.google.common.annotations.VisibleForTesting;
  * <p>The tool scans all files and directories, starting from an indicated
  *  root path. The following abnormal conditions are detected and handled:</p>
  * <ul>
- * <li>files with blocks that are completely missing from all datanodes.<br/>
+ * <li>files with blocks that are completely missing from all datanodes.<br>
  * In this case the tool can perform one of the following actions:
  *  <ul>
- *      <li>none ({@link #FIXING_NONE})</li>
  *      <li>move corrupted files to /lost+found directory on DFS
- *      ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
+ *      ({@link #doMove}). Remaining data blocks are saved as a
  *      block chains, representing longest consecutive series of valid blocks.</li>
- *      <li>delete corrupted files ({@link #FIXING_DELETE})</li>
+ *      <li>delete corrupted files ({@link #doDelete})</li>
  *  </ul>
  *  </li>
  *  <li>detect files with under-replicated or over-replicated blocks</li>
@@ -201,7 +200,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
    */
   NamenodeFsck(Configuration conf, NameNode namenode,
       NetworkTopology networktopology,
-      Map<String,String[]> pmap, PrintWriter out,
+      Map<String, String[]> pmap, PrintWriter out,
       int totalDatanodes, InetAddress remoteAddress) {
     this.conf = conf;
     this.namenode = namenode;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: YARN-8944. TestContainerAllocation.testUserLimitAllocationMultipleContainers failure after YARN-8896. Contributed by Wilfred Spiegelenburg.

Posted by su...@apache.org.
YARN-8944. TestContainerAllocation.testUserLimitAllocationMultipleContainers failure after YARN-8896. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d90a0dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d90a0dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d90a0dd

Branch: refs/heads/HDFS-12943
Commit: 1d90a0dd23c5d1bd52d04c303506806d0dc61cd2
Parents: 63e7134
Author: Weiwei Yang <ww...@apache.org>
Authored: Mon Oct 29 11:53:10 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Mon Oct 29 11:53:10 2018 +0800

----------------------------------------------------------------------
 .../scheduler/capacity/TestContainerAllocation.java              | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d90a0dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index d5f6ab1..93c35e4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -70,6 +70,7 @@ import org.junit.Before;
 import org.junit.Test;
 
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_MB;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT;
 
 public class TestContainerAllocation {
 
@@ -906,6 +907,9 @@ public class TestContainerAllocation {
     CapacitySchedulerConfiguration newConf =
         (CapacitySchedulerConfiguration) TestUtils
             .getConfigurationWithMultipleQueues(conf);
+    // make sure an unlimited number of containers can be assigned,
+    // overriding the default of 100 after YARN-8896
+    newConf.set(MAX_ASSIGN_PER_HEARTBEAT, "-1");
     newConf.setUserLimit("root.c", 50);
     MockRM rm1 = new MockRM(newConf);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDFS-13959. TestUpgradeDomainBlockPlacementPolicy is flaky. Contributed by Ayush Saxena.

Posted by su...@apache.org.
HDFS-13959. TestUpgradeDomainBlockPlacementPolicy is flaky. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1851d06e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1851d06e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1851d06e

Branch: refs/heads/HDFS-12943
Commit: 1851d06eb3b70f39f3054a7c06f0ad2bc664aaec
Parents: dce4ebe
Author: Inigo Goiri <in...@apache.org>
Authored: Fri Oct 26 09:29:12 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Fri Oct 26 09:29:12 2018 -0700

----------------------------------------------------------------------
 .../TestUpgradeDomainBlockPlacementPolicy.java  | 22 ++++++++++++--------
 1 file changed, 13 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1851d06e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
index 8460b6f..3383c4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
@@ -65,14 +65,8 @@ public class TestUpgradeDomainBlockPlacementPolicy {
   private static final int DEFAULT_BLOCK_SIZE = 1024;
   static final String[] racks =
       { "/RACK1", "/RACK1", "/RACK1", "/RACK2", "/RACK2", "/RACK2" };
-  /**
-   *  Use host names that can be resolved (
-   *  InetSocketAddress#isUnresolved == false). Otherwise,
-   *  CombinedHostFileManager won't allow those hosts.
-   */
   static final String[] hosts =
-      {"127.0.0.1", "127.0.0.1", "127.0.0.1", "127.0.0.1",
-          "127.0.0.1", "127.0.0.1"};
+      {"host1", "host2", "host3", "host4", "host5", "host6"};
   static final String[] upgradeDomains =
       {"ud5", "ud2", "ud3", "ud1", "ud2", "ud4"};
   static final Set<DatanodeID> expectedDatanodeIDs = new HashSet<>();
@@ -134,7 +128,12 @@ public class TestUpgradeDomainBlockPlacementPolicy {
     for (int i = 0; i < hosts.length; i++) {
       datanodes[i] = new DatanodeAdminProperties();
       DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
-      datanodes[i].setHostName(datanodeID.getHostName());
+      /*
+       *  Use host names that can be resolved (
+       *  InetSocketAddress#isUnresolved == false). Otherwise,
+       *  CombinedHostFileManager won't allow those hosts.
+       */
+      datanodes[i].setHostName(datanodeID.getIpAddr());
       datanodes[i].setPort(datanodeID.getXferPort());
       datanodes[i].setUpgradeDomain(upgradeDomains[i]);
     }
@@ -168,7 +167,12 @@ public class TestUpgradeDomainBlockPlacementPolicy {
     for (int i = 0; i < hosts.length; i++) {
       datanodes[i] = new DatanodeAdminProperties();
       DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
-      datanodes[i].setHostName(datanodeID.getHostName());
+      /*
+       *  Use host names that can be resolved (
+       *  InetSocketAddress#isUnresolved == false). Otherwise,
+       *  CombinedHostFileManager won't allow those hosts.
+       */
+      datanodes[i].setHostName(datanodeID.getIpAddr());
       datanodes[i].setPort(datanodeID.getXferPort());
       datanodes[i].setUpgradeDomain(upgradeDomains[i]);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDDS-694. Plugin new Pipeline management code in SCM. Contributed by Lokesh Jain.

Posted by su...@apache.org.
HDDS-694. Plugin new Pipeline management code in SCM.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dce4ebe8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dce4ebe8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dce4ebe8

Branch: refs/heads/HDFS-12943
Commit: dce4ebe81471fa2c1ef913a1a2c8acffcbdaa6f8
Parents: e28c00c
Author: Nanda kumar <na...@apache.org>
Authored: Fri Oct 26 17:53:47 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Fri Oct 26 17:55:04 2018 +0530

----------------------------------------------------------------------
 .../hadoop/hdds/scm/XceiverClientGrpc.java      |  15 +-
 .../hadoop/hdds/scm/XceiverClientManager.java   |   6 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java     |  31 +-
 .../scm/client/ContainerOperationClient.java    |  40 +-
 .../hadoop/hdds/scm/XceiverClientSpi.java       |   2 +-
 .../hadoop/hdds/scm/client/ScmClient.java       |   2 +-
 .../hdds/scm/container/ContainerInfo.java       |   2 +-
 .../common/helpers/AllocatedBlock.java          |   1 +
 .../common/helpers/ContainerWithPipeline.java   |   3 +-
 .../scm/container/common/helpers/Pipeline.java  | 319 ------------
 .../container/common/helpers/PipelineID.java    |  97 ----
 .../hadoop/hdds/scm/pipeline/Pipeline.java      |  41 +-
 .../scm/pipeline/PipelineNotFoundException.java |  28 ++
 .../StorageContainerLocationProtocol.java       |   2 +-
 ...kLocationProtocolClientSideTranslatorPB.java |   4 +-
 ...rLocationProtocolClientSideTranslatorPB.java |   4 +-
 .../scm/storage/ContainerProtocolCalls.java     |  22 +-
 .../main/java/org/apache/ratis/RatisHelper.java |  15 +-
 .../transport/server/XceiverServerGrpc.java     |   2 +-
 .../server/ratis/XceiverServerRatis.java        |  10 +-
 .../commands/CloseContainerCommand.java         |   2 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |   2 +-
 .../block/DatanodeDeletedBlockTransactions.java |   4 +-
 .../hdds/scm/block/DeletedBlockLogImpl.java     |   5 +-
 .../container/CloseContainerEventHandler.java   |   4 +-
 .../hdds/scm/container/ContainerManager.java    |   6 +-
 .../scm/container/ContainerStateManager.java    |  25 +-
 .../hdds/scm/container/SCMContainerManager.java |  59 +--
 .../hadoop/hdds/scm/events/SCMEvents.java       |   9 -
 .../hadoop/hdds/scm/node/NodeManager.java       |   4 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  |   4 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |   4 +-
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  |  30 +-
 .../hdds/scm/node/states/Node2PipelineMap.java  |   8 +-
 .../hdds/scm/pipeline/PipelineFactory.java      |   6 +-
 .../hdds/scm/pipeline/PipelineManager.java      |  10 +-
 .../hdds/scm/pipeline/PipelineProvider.java     |   2 +-
 .../scm/pipeline/PipelineReportHandler.java     |  16 +-
 .../hdds/scm/pipeline/PipelineStateManager.java |  27 +-
 .../hdds/scm/pipeline/PipelineStateMap.java     |  91 +++-
 .../scm/pipeline/RatisPipelineProvider.java     |  15 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java   |  37 +-
 .../scm/pipeline/SimplePipelineProvider.java    |  13 +-
 .../pipelines/PipelineActionEventHandler.java   |  62 ---
 .../scm/pipelines/PipelineCloseHandler.java     |  52 --
 .../hdds/scm/pipelines/PipelineManager.java     | 171 -------
 .../scm/pipelines/PipelineReportHandler.java    |  59 ---
 .../hdds/scm/pipelines/PipelineSelector.java    | 481 -------------------
 .../scm/pipelines/PipelineStateManager.java     | 136 ------
 .../hadoop/hdds/scm/pipelines/package-info.java |  38 --
 .../scm/pipelines/ratis/RatisManagerImpl.java   | 129 -----
 .../hdds/scm/pipelines/ratis/package-info.java  |  18 -
 .../standalone/StandaloneManagerImpl.java       | 122 -----
 .../scm/pipelines/standalone/package-info.java  |  18 -
 .../scm/server/SCMClientProtocolServer.java     |   2 +-
 .../scm/server/StorageContainerManager.java     |  39 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |   9 +-
 .../hdds/scm/block/TestDeletedBlockLog.java     |  32 +-
 .../hdds/scm/container/MockNodeManager.java     |   4 +-
 .../TestCloseContainerEventHandler.java         |  20 +-
 .../container/TestContainerReportHandler.java   |   7 +-
 .../container/TestContainerStateManager.java    |  29 +-
 .../scm/container/TestSCMContainerManager.java  |  24 +-
 .../replication/TestReplicationManager.java     |  29 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   8 +-
 .../hdds/scm/node/TestDeadNodeHandler.java      |   7 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |   2 +-
 .../ozone/container/common/TestEndPoint.java    |   2 +-
 .../testutils/ReplicationNodeManagerMock.java   |   4 +-
 .../hdds/scm/cli/container/InfoSubcommand.java  |   5 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |   7 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |  29 +-
 .../hdds/scm/pipeline/TestNodeFailure.java      |  22 +-
 .../hdds/scm/pipeline/TestPipelineClose.java    |  41 +-
 .../scm/pipeline/TestPipelineStateManager.java  | 171 ++++---
 .../scm/pipeline/TestRatisPipelineProvider.java |  13 +-
 .../scm/pipeline/TestSCMPipelineManager.java    |  45 +-
 .../hdds/scm/pipeline/TestSCMRestart.java       |  23 +-
 .../pipeline/TestSimplePipelineProvider.java    |  13 +-
 .../apache/hadoop/ozone/RatisTestHelper.java    |   2 +-
 .../TestContainerStateMachineIdempotency.java   |   2 +-
 .../hadoop/ozone/TestMiniOzoneCluster.java      |  23 +-
 .../ozone/TestStorageContainerManager.java      |   4 +-
 .../TestStorageContainerManagerHelper.java      |   5 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |   4 +-
 .../rpc/TestCloseContainerHandlingByClient.java |  10 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java    |   6 +-
 .../ozone/container/ContainerTestHelper.java    |  70 +--
 .../container/TestContainerReplication.java     |   6 +-
 .../common/impl/TestCloseContainerHandler.java  |  10 +-
 .../TestCloseContainerByPipeline.java           |   8 +-
 .../TestCloseContainerHandler.java              |   2 +-
 .../transport/server/ratis/TestCSMMetrics.java  |   4 +-
 .../container/metrics/TestContainerMetrics.java |   4 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   9 +-
 .../ozoneimpl/TestOzoneContainerRatis.java      |   6 +-
 .../container/server/TestContainerServer.java   |  21 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java |   2 +-
 .../TestGetCommittedBlockLengthAndPutKey.java   |   2 +-
 .../ozone/om/ScmBlockLocationTestIngClient.java |  19 +-
 .../genesis/BenchMarkContainerStateMap.java     |  27 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |   6 +-
 102 files changed, 791 insertions(+), 2363 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 9526be3..cc34e27 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServi
 import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
@@ -39,6 +39,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
 import java.util.UUID;
@@ -84,9 +85,9 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   public void connect() throws Exception {
 
     // leader by default is the 1st datanode in the datanode list of pipleline
-    DatanodeDetails leader = this.pipeline.getLeader();
+    DatanodeDetails dn = this.pipeline.getFirstNode();
     // just make a connection to the 1st datanode at the beginning
-    connectToDatanode(leader);
+    connectToDatanode(dn);
   }
 
   private void connectToDatanode(DatanodeDetails dn) {
@@ -148,18 +149,16 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 
   public ContainerCommandResponseProto sendCommandWithRetry(
       ContainerCommandRequestProto request) throws IOException {
-    int size = pipeline.getMachines().size();
     ContainerCommandResponseProto responseProto = null;
-    DatanodeDetails dn = null;
 
     // In case of an exception or an error, we will try to read from the
     // datanodes in the pipeline in a round robin fashion.
 
     // TODO: cache the correct leader info in here, so that any subsequent calls
     // should first go to leader
-    for (int dnIndex = 0; dnIndex < size; dnIndex++) {
+    List<DatanodeDetails> dns = pipeline.getNodes();
+    for (DatanodeDetails dn : dns) {
       try {
-        dn = pipeline.getMachines().get(dnIndex);
         LOG.debug("Executing command " + request + " on datanode " + dn);
         // In case the command gets retried on a 2nd datanode,
         // sendCommandAsyncCall will create a new channel and async stub
@@ -201,7 +200,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   public CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(
       ContainerCommandRequestProto request)
       throws IOException, ExecutionException, InterruptedException {
-    return sendCommandAsync(request, pipeline.getLeader());
+    return sendCommandAsync(request, pipeline.getFirstNode());
   }
 
   private CompletableFuture<ContainerCommandResponseProto> sendCommandAsync(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 83b5a4c..1973c1d 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -25,7 +25,7 @@ import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 import java.io.Closeable;
@@ -115,8 +115,8 @@ public class XceiverClientManager implements Closeable {
   public XceiverClientSpi acquireClient(Pipeline pipeline)
       throws IOException {
     Preconditions.checkNotNull(pipeline);
-    Preconditions.checkArgument(pipeline.getMachines() != null);
-    Preconditions.checkArgument(!pipeline.getMachines().isEmpty());
+    Preconditions.checkArgument(pipeline.getNodes() != null);
+    Preconditions.checkArgument(!pipeline.getNodes().isEmpty());
 
     synchronized (clientCache) {
       XceiverClientSpi info = getClient(pipeline);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index d2eb68b..f38fd3b 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdds.scm;
 
 import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.ratis.proto.RaftProtos;
 import org.apache.ratis.retry.RetryPolicy;
@@ -27,7 +26,7 @@ import org.apache.ratis.thirdparty.com.google.protobuf
     .InvalidProtocolBufferException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
@@ -64,19 +63,6 @@ public final class XceiverClientRatis extends XceiverClientSpi {
   static final Logger LOG = LoggerFactory.getLogger(XceiverClientRatis.class);
 
   public static XceiverClientRatis newXceiverClientRatis(
-      Pipeline pipeline, Configuration ozoneConf) {
-    final String rpcType = ozoneConf.get(
-        ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
-        ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final int maxOutstandingRequests =
-        HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
-    final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
-    return new XceiverClientRatis(pipeline,
-        SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
-        retryPolicy);
-  }
-
-  public static XceiverClientRatis newXceiverClientRatis(
       org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
       Configuration ozoneConf) {
     final String rpcType = ozoneConf
@@ -85,11 +71,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
     final int maxOutstandingRequests =
         HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
     final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
-    Pipeline pipeline1 =
-        new Pipeline(pipeline.getNodes().get(0).getUuidString(),
-            HddsProtos.LifeCycleState.OPEN, pipeline.getType(),
-            pipeline.getFactor(), PipelineID.valueOf(pipeline.getID().getId()));
-    return new XceiverClientRatis(pipeline1,
+    return new XceiverClientRatis(pipeline,
         SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
         retryPolicy);
   }
@@ -118,7 +100,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
   public void createPipeline() throws IOException {
     final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
     LOG.debug("creating pipeline:{} with {}", pipeline.getId(), group);
-    callRatisRpc(pipeline.getMachines(),
+    callRatisRpc(pipeline.getNodes(),
         (raftClient, peer) -> raftClient.groupAdd(group, peer.getId()));
   }
 
@@ -128,7 +110,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
   public void destroyPipeline() throws IOException {
     final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
     LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);
-    callRatisRpc(pipeline.getMachines(), (raftClient, peer) -> raftClient
+    callRatisRpc(pipeline.getNodes(), (raftClient, peer) -> raftClient
         .groupRemove(group.getGroupId(), true, peer.getId()));
   }
 
@@ -174,9 +156,8 @@ public final class XceiverClientRatis extends XceiverClientSpi {
 
   @Override
   public void connect() throws Exception {
-    LOG.debug("Connecting to pipeline:{} leader:{}",
-        getPipeline().getId(),
-        RatisHelper.toRaftPeerId(pipeline.getLeader()));
+    LOG.debug("Connecting to pipeline:{} datanode:{}", getPipeline().getId(),
+        RatisHelper.toRaftPeerId(pipeline.getFirstNode()));
     // TODO : XceiverClient ratis should pass the config value of
     // maxOutstandingRequests so as to set the upper bound on max no of async
     // requests to be handled by raft client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index c635df4..25a71df 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
@@ -40,11 +40,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
 
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
-    .ALLOCATED;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState
-    .OPEN;
-
 /**
  * This class provides the client-facing APIs of container operations.
  */
@@ -98,14 +93,10 @@ public class ContainerOperationClient implements ScmClient {
       Pipeline pipeline = containerWithPipeline.getPipeline();
       client = xceiverClientManager.acquireClient(pipeline);
 
-      // Allocated State means that SCM has allocated this pipeline in its
-      // namespace. The client needs to create the pipeline on the machines
-      // which was choosen by the SCM.
-      Preconditions.checkState(pipeline.getLifeCycleState() == ALLOCATED ||
-          pipeline.getLifeCycleState() == OPEN, "Unexpected pipeline state");
-      if (pipeline.getLifeCycleState() == ALLOCATED) {
-        createPipeline(client, pipeline);
-      }
+      Preconditions.checkState(pipeline.isOpen(), String
+          .format("Unexpected state=%s for pipeline=%s, expected state=%s",
+              pipeline.getPipelineState(), pipeline.getId(),
+              Pipeline.PipelineState.OPEN));
       createContainer(client,
           containerWithPipeline.getContainerInfo().getContainerID());
       return containerWithPipeline;
@@ -142,8 +133,7 @@ public class ContainerOperationClient implements ScmClient {
     // creation state.
     if (LOG.isDebugEnabled()) {
       LOG.debug("Created container " + containerId
-          + " leader:" + client.getPipeline().getLeader()
-          + " machines:" + client.getPipeline().getMachines());
+          + " machines:" + client.getPipeline().getNodes());
     }
   }
 
@@ -208,12 +198,6 @@ public class ContainerOperationClient implements ScmClient {
       Pipeline pipeline = containerWithPipeline.getPipeline();
       client = xceiverClientManager.acquireClient(pipeline);
 
-      // Allocated State means that SCM has allocated this pipeline in its
-      // namespace. The client needs to create the pipeline on the machines
-      // which was choosen by the SCM.
-      if (pipeline.getLifeCycleState() == ALLOCATED) {
-        createPipeline(client, pipeline);
-      }
       // connect to pipeline leader and allocate container on leader datanode.
       client = xceiverClientManager.acquireClient(pipeline);
       createContainer(client,
@@ -283,10 +267,8 @@ public class ContainerOperationClient implements ScmClient {
       storageContainerLocationClient
           .deleteContainer(containerId);
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Deleted container {}, leader: {}, machines: {} ",
-            containerId,
-            pipeline.getLeader(),
-            pipeline.getMachines());
+        LOG.debug("Deleted container {}, machines: {} ", containerId,
+            pipeline.getNodes());
       }
     } finally {
       if (client != null) {
@@ -336,10 +318,8 @@ public class ContainerOperationClient implements ScmClient {
       ReadContainerResponseProto response =
           ContainerProtocolCalls.readContainer(client, containerID, traceID);
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Read container {}, leader: {}, machines: {} ",
-            containerID,
-            pipeline.getLeader(),
-            pipeline.getMachines());
+        LOG.debug("Read container {}, machines: {} ", containerID,
+            pipeline.getNodes());
       }
       return response.getContainerData();
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
index 571d148..b36315e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hdds.scm;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index c37f42c..3d5d56c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.scm.client;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerData;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index a5ea3e3..64407a7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -36,7 +36,7 @@ import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.util.Time;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
index 63781a8..f657b74 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.container.common.helpers;
 
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.client.BlockID;
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
index af74a7d..8f49255 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
@@ -23,6 +23,7 @@ import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 
 /**
  * Class wraps ozone container info.
@@ -50,7 +51,7 @@ public class ContainerWithPipeline implements Comparator<ContainerWithPipeline>,
       HddsProtos.ContainerWithPipeline allocatedContainer) {
     return new ContainerWithPipeline(
         ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
-        Pipeline.getFromProtoBuf(allocatedContainer.getPipeline()));
+        Pipeline.getFromProtobuf(allocatedContainer.getPipeline()));
   }
 
   public HddsProtos.ContainerWithPipeline getProtobuf() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
deleted file mode 100644
index b0817f7..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.List;
-
-/**
- * A pipeline represents the group of machines over which a container lives.
- */
-public class Pipeline {
-  static final String PIPELINE_INFO = "PIPELINE_INFO_FILTER";
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"leaderID", "datanodes"};
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(PIPELINE_INFO, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    WRITER = mapper.writer(filters);
-  }
-
-  @JsonIgnore
-  private String leaderID;
-  @JsonIgnore
-  private Map<String, DatanodeDetails> datanodes;
-  private HddsProtos.LifeCycleState lifeCycleState;
-  private HddsProtos.ReplicationType type;
-  private HddsProtos.ReplicationFactor factor;
-  private PipelineID id;
-
-  /**
-   * Constructs a new pipeline data structure.
-   *
-   * @param leaderID       -  Leader datanode id
-   * @param lifeCycleState  - Pipeline State
-   * @param replicationType - Replication protocol
-   * @param replicationFactor - replication count on datanodes
-   * @param id  - pipeline ID
-   */
-  public Pipeline(String leaderID, HddsProtos.LifeCycleState lifeCycleState,
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor, PipelineID id) {
-    this.leaderID = leaderID;
-    this.lifeCycleState = lifeCycleState;
-    this.type = replicationType;
-    this.factor = replicationFactor;
-    this.id = id;
-    datanodes = new ConcurrentHashMap<>();
-  }
-
-  @Override
-  public int hashCode() {
-    return id.hashCode();
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    Pipeline that = (Pipeline) o;
-
-    return id.equals(that.id)
-            && factor.equals(that.factor)
-            && type.equals(that.type)
-            && lifeCycleState.equals(that.lifeCycleState)
-            && leaderID.equals(that.leaderID);
-
-  }
-
-  /**
-   * Gets pipeline object from protobuf.
-   *
-   * @param pipelineProto - ProtoBuf definition for the pipeline.
-   * @return Pipeline Object
-   */
-  public static Pipeline getFromProtoBuf(
-      HddsProtos.Pipeline pipelineProto) {
-    Preconditions.checkNotNull(pipelineProto);
-    Pipeline pipeline =
-        new Pipeline(pipelineProto.getLeaderID(),
-            pipelineProto.getState(),
-            pipelineProto.getType(),
-            pipelineProto.getFactor(),
-            PipelineID.getFromProtobuf(pipelineProto.getId()));
-
-    for (HddsProtos.DatanodeDetailsProto dataID :
-        pipelineProto.getMembersList()) {
-      pipeline.addMember(DatanodeDetails.getFromProtoBuf(dataID));
-    }
-    return pipeline;
-  }
-
-  /**
-   * returns the replication count.
-   * @return Replication Factor
-   */
-  public HddsProtos.ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  /**
-   * Returns the first machine in the set of datanodes.
-   *
-   * @return First Machine.
-   */
-  @JsonIgnore
-  public DatanodeDetails getLeader() {
-    return getDatanodes().get(leaderID);
-  }
-
-  /**
-   * Adds a datanode to pipeline
-   * @param datanodeDetails datanode to be added.
-   * @return true if the dn was not earlier present, false otherwise
-   */
-  public boolean addMember(DatanodeDetails datanodeDetails) {
-    return datanodes.put(datanodeDetails.getUuid().toString(),
-        datanodeDetails) == null;
-
-  }
-
-  public void resetPipeline() {
-    // reset datanodes in pipeline and learn about them through
-    // pipeline reports on SCM restart
-    datanodes.clear();
-  }
-
-  public Map<String, DatanodeDetails> getDatanodes() {
-    return datanodes;
-  }
-
-  public boolean isEmpty() {
-    return datanodes.isEmpty();
-  }
-  /**
-   * Returns the leader host.
-   *
-   * @return First Machine.
-   */
-  public String getLeaderHost() {
-    return getDatanodes()
-        .get(leaderID).getHostName();
-  }
-
-  /**
-   *
-   * @return lead
-   */
-  public String getLeaderID() {
-    return leaderID;
-  }
-  /**
-   * Returns all machines that make up this pipeline.
-   *
-   * @return List of Machines.
-   */
-  @JsonIgnore
-  public List<DatanodeDetails> getMachines() {
-    return new ArrayList<>(getDatanodes().values());
-  }
-
-  /**
-   * Returns all machines that make up this pipeline.
-   *
-   * @return List of Machines.
-   */
-  public List<String> getDatanodeHosts() {
-    List<String> dataHosts = new ArrayList<>();
-    for (DatanodeDetails datanode : getDatanodes().values()) {
-      dataHosts.add(datanode.getHostName());
-    }
-    return dataHosts;
-  }
-
-  /**
-   * Return a Protobuf Pipeline message from pipeline.
-   *
-   * @return Protobuf message
-   */
-  @JsonIgnore
-  public HddsProtos.Pipeline getProtobufMessage() {
-    HddsProtos.Pipeline.Builder builder =
-        HddsProtos.Pipeline.newBuilder();
-    for (DatanodeDetails datanode : datanodes.values()) {
-      builder.addMembers(datanode.getProtoBufMessage());
-    }
-    builder.setLeaderID(leaderID);
-
-    if (lifeCycleState != null) {
-      builder.setState(lifeCycleState);
-    }
-    if (type != null) {
-      builder.setType(type);
-    }
-
-    if (factor != null) {
-      builder.setFactor(factor);
-    }
-
-    if (id != null) {
-      builder.setId(id.getProtobuf());
-    }
-    return builder.build();
-  }
-
-  /**
-   * Gets the State of the pipeline.
-   *
-   * @return - LifeCycleStates.
-   */
-  public HddsProtos.LifeCycleState getLifeCycleState() {
-    return lifeCycleState;
-  }
-
-  /**
-   * Update the State of the pipeline.
-   */
-  public void setLifeCycleState(HddsProtos.LifeCycleState nextState) {
-    lifeCycleState = nextState;
-  }
-
-  /**
-   * Gets the pipeline id.
-   *
-   * @return - Id of the pipeline
-   */
-  public PipelineID getId() {
-    return id;
-  }
-
-  /**
-   * Returns the type.
-   *
-   * @return type - Standalone, Ratis, Chained.
-   */
-  public HddsProtos.ReplicationType getType() {
-    return type;
-  }
-
-  @Override
-  public String toString() {
-    final StringBuilder b = new StringBuilder(getClass().getSimpleName())
-        .append("[");
-    getDatanodes().keySet().forEach(
-        node -> b.append(node.endsWith(getLeaderID()) ? "*" + id : id));
-    b.append(" id:").append(id);
-    if (getType() != null) {
-      b.append(" type:").append(getType().toString());
-    }
-    if (getFactor() != null) {
-      b.append(" factor:").append(getFactor().toString());
-    }
-    if (getLifeCycleState() != null) {
-      b.append(" State:").append(getLifeCycleState().toString());
-    }
-    return b.toString();
-  }
-
-  public void setType(HddsProtos.ReplicationType type) {
-    this.type = type;
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  @JsonFilter(PIPELINE_INFO)
-  class MixIn {
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java
deleted file mode 100644
index 6e27a71..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.ratis.protocol.RaftGroupId;
-
-import java.util.UUID;
-
-/**
- * ID for the pipeline, the ID is based on UUID so that it can be used
- * in Ratis as RaftGroupId, GroupID is used by the datanodes to initialize
- * the ratis group they are part of.
- */
-public final class PipelineID implements Comparable<PipelineID> {
-
-  private UUID id;
-  private RaftGroupId groupId;
-
-  private PipelineID(UUID id) {
-    this.id = id;
-    this.groupId = RaftGroupId.valueOf(id);
-  }
-
-  public static PipelineID randomId() {
-    return new PipelineID(UUID.randomUUID());
-  }
-
-  public static PipelineID valueOf(UUID id) {
-    return new PipelineID(id);
-  }
-
-  public static PipelineID valueOf(RaftGroupId groupId) {
-    return valueOf(groupId.getUuid());
-  }
-
-  public RaftGroupId getRaftGroupID() {
-    return groupId;
-  }
-
-  public UUID getId() {
-    return id;
-  }
-
-  public HddsProtos.PipelineID getProtobuf() {
-    return HddsProtos.PipelineID.newBuilder().setId(id.toString()).build();
-  }
-
-  public static PipelineID getFromProtobuf(HddsProtos.PipelineID protos) {
-    return new PipelineID(UUID.fromString(protos.getId()));
-  }
-
-  @Override
-  public String toString() {
-    return "pipelineId=" + id;
-  }
-
-  @Override
-  public int compareTo(PipelineID o) {
-    return this.id.compareTo(o.id);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    PipelineID that = (PipelineID) o;
-
-    return id.equals(that.id);
-  }
-
-  @Override
-  public int hashCode() {
-    return id.hashCode();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index b22a0c6..ef055a1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -60,7 +60,7 @@ public final class Pipeline {
    *
    * @return PipelineID
    */
-  public PipelineID getID() {
+  public PipelineID getId() {
     return id;
   }
 
@@ -87,11 +87,26 @@ public final class Pipeline {
    *
    * @return - LifeCycleStates.
    */
-  PipelineState getPipelineState() {
-    // TODO: See if we need to expose this.
+  public PipelineState getPipelineState() {
     return state;
   }
 
+  /**
+   * Returns the list of nodes which form this pipeline.
+   *
+   * @return List of DatanodeDetails
+   */
+  public List<DatanodeDetails> getNodes() {
+    return new ArrayList<>(nodeStatus.keySet());
+  }
+
+  public DatanodeDetails getFirstNode() throws IOException {
+    if (nodeStatus.isEmpty()) {
+      throw new IOException(String.format("Pipeline=%s is empty", id));
+    }
+    return nodeStatus.keySet().iterator().next();
+  }
+
   public boolean isClosed() {
     return state == PipelineState.CLOSED;
   }
@@ -117,13 +132,8 @@ public final class Pipeline {
     return true;
   }
 
-  /**
-   * Returns the list of nodes which form this pipeline.
-   *
-   * @return List of DatanodeDetails
-   */
-  public List<DatanodeDetails> getNodes() {
-    return new ArrayList<>(nodeStatus.keySet());
+  public boolean isEmpty() {
+    return nodeStatus.isEmpty();
   }
 
   public HddsProtos.Pipeline getProtobufMessage() {
@@ -138,7 +148,7 @@ public final class Pipeline {
     return builder.build();
   }
 
-  public static Pipeline fromProtobuf(HddsProtos.Pipeline pipeline) {
+  public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) {
     return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId()))
         .setFactor(pipeline.getFactor())
         .setType(pipeline.getType())
@@ -164,8 +174,7 @@ public final class Pipeline {
         .append(id, that.id)
         .append(type, that.type)
         .append(factor, that.factor)
-        .append(state, that.state)
-        .append(nodeStatus, that.nodeStatus)
+        .append(getNodes(), that.getNodes())
         .isEquals();
   }
 
@@ -175,7 +184,6 @@ public final class Pipeline {
         .append(id)
         .append(type)
         .append(factor)
-        .append(state)
         .append(nodeStatus)
         .toHashCode();
   }
@@ -244,7 +252,10 @@ public final class Pipeline {
     }
   }
 
-  enum PipelineState {
+  /**
+   * Possible Pipeline states in SCM.
+   */
+  public enum PipelineState {
     ALLOCATED, OPEN, CLOSED
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
new file mode 100644
index 0000000..4568379
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
@@ -0,0 +1,28 @@
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import java.io.IOException;
+
+/**
+ * Signals that a pipeline is missing from PipelineManager.
+ */
+public class PipelineNotFoundException extends IOException{
+  /**
+   * Constructs an {@code PipelineNotFoundException} with {@code null}
+   * as its error detail message.
+   */
+  public PipelineNotFoundException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code PipelineNotFoundException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public PipelineNotFoundException(String message) {
+    super(message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 712fb7e..82dfe16 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.scm.protocol;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
index aed0fb7..e684ae3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
@@ -103,7 +103,7 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB
     }
     AllocatedBlock.Builder builder = new AllocatedBlock.Builder()
         .setBlockID(BlockID.getFromProtobuf(response.getBlockID()))
-        .setPipeline(Pipeline.getFromProtoBuf(response.getPipeline()))
+        .setPipeline(Pipeline.getFromProtobuf(response.getPipeline()))
         .setShouldCreateContainer(response.getCreateContainer());
     return builder.build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 8e723e6..d19efc1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolPro
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
@@ -292,7 +292,7 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
           PipelineResponseProto.Error.success) {
         Preconditions.checkState(response.hasPipeline(), "With success, " +
             "must come a pipeline");
-        return Pipeline.getFromProtoBuf(response.getPipeline());
+        return Pipeline.getFromProtobuf(response.getPipeline());
       } else {
         String errorMessage = String.format("create replication pipeline " +
                 "failed. code : %s Message: %s", response.getErrorCode(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 9bf0241..df1467b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -92,7 +92,7 @@ public final class ContainerProtocolCalls  {
         .newBuilder()
         .setBlockID(datanodeBlockID)
         .setBlockCommitSequenceId(blockCommitSequenceId);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
 
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
@@ -125,7 +125,7 @@ public final class ContainerProtocolCalls  {
         getBlockLengthRequestBuilder =
         ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder().
             setBlockID(blockID.getDatanodeBlockIDProtobuf());
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
     ContainerCommandRequestProto request =
         ContainerCommandRequestProto.newBuilder()
             .setCmdType(Type.GetCommittedBlockLength)
@@ -152,7 +152,7 @@ public final class ContainerProtocolCalls  {
       String traceID) throws IOException {
     PutBlockRequestProto.Builder createBlockRequest =
         PutBlockRequestProto.newBuilder().setBlockData(containerBlockData);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
     ContainerCommandRequestProto request =
         ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock)
             .setContainerID(containerBlockData.getBlockID().getContainerID())
@@ -179,7 +179,7 @@ public final class ContainerProtocolCalls  {
         .newBuilder()
         .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .setChunkData(chunk);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(Type.ReadChunk)
@@ -211,7 +211,7 @@ public final class ContainerProtocolCalls  {
         .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .setChunkData(chunk)
         .setData(data);
-    String id = xceiverClient.getPipeline().getLeader().getUuidString();
+    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
         .setCmdType(Type.WriteChunk)
@@ -260,7 +260,7 @@ public final class ContainerProtocolCalls  {
             .setBlock(createBlockRequest).setData(ByteString.copyFrom(data))
             .build();
 
-    String id = client.getPipeline().getLeader().getUuidString();
+    String id = client.getPipeline().getFirstNode().getUuidString();
     ContainerCommandRequestProto request =
         ContainerCommandRequestProto.newBuilder()
             .setCmdType(Type.PutSmallFile)
@@ -288,7 +288,7 @@ public final class ContainerProtocolCalls  {
     createRequest.setContainerType(ContainerProtos.ContainerType
         .KeyValueContainer);
 
-    String id = client.getPipeline().getLeader().getUuidString();
+    String id = client.getPipeline().getFirstNode().getUuidString();
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.CreateContainer);
@@ -314,7 +314,7 @@ public final class ContainerProtocolCalls  {
     ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest =
         ContainerProtos.DeleteContainerRequestProto.newBuilder();
     deleteRequest.setForceDelete(force);
-    String id = client.getPipeline().getLeader().getUuidString();
+    String id = client.getPipeline().getFirstNode().getUuidString();
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -338,7 +338,7 @@ public final class ContainerProtocolCalls  {
    */
   public static void closeContainer(XceiverClientSpi client,
       long containerID, String traceID) throws IOException {
-    String id = client.getPipeline().getLeader().getUuidString();
+    String id = client.getPipeline().getFirstNode().getUuidString();
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -362,7 +362,7 @@ public final class ContainerProtocolCalls  {
   public static ReadContainerResponseProto readContainer(
       XceiverClientSpi client, long containerID,
       String traceID) throws IOException {
-    String id = client.getPipeline().getLeader().getUuidString();
+    String id = client.getPipeline().getFirstNode().getUuidString();
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -396,7 +396,7 @@ public final class ContainerProtocolCalls  {
         GetSmallFileRequestProto
             .newBuilder().setBlock(getBlock)
             .build();
-    String id = client.getPipeline().getLeader().getUuidString();
+    String id = client.getPipeline().getFirstNode().getUuidString();
 
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
index 2dbe2e6..1ff7695 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java
@@ -19,7 +19,7 @@
 package org.apache.ratis;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.ratis.client.RaftClient;
@@ -40,6 +40,7 @@ import org.apache.ratis.util.TimeDuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -88,7 +89,7 @@ public interface RatisHelper {
   }
 
   static List<RaftPeer> toRaftPeers(Pipeline pipeline) {
-    return toRaftPeers(pipeline.getMachines());
+    return toRaftPeers(pipeline.getNodes());
   }
 
   static <E extends DatanodeDetails> List<RaftPeer> toRaftPeers(
@@ -125,15 +126,15 @@ public interface RatisHelper {
   }
 
   static RaftGroup newRaftGroup(Pipeline pipeline) {
-    return RaftGroup.valueOf(pipeline.getId().getRaftGroupID(),
+    return RaftGroup.valueOf(RaftGroupId.valueOf(pipeline.getId().getId()),
         toRaftPeers(pipeline));
   }
 
   static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline,
-      RetryPolicy retryPolicy) {
-    return newRaftClient(rpcType, toRaftPeerId(pipeline.getLeader()),
-        newRaftGroup(pipeline.getId().getRaftGroupID(), pipeline.getMachines()),
-        retryPolicy);
+      RetryPolicy retryPolicy) throws IOException {
+    return newRaftClient(rpcType, toRaftPeerId(pipeline.getFirstNode()),
+        newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()),
+            pipeline.getNodes()), retryPolicy);
   }
 
   static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 8ebfe49..ab9f42f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReport;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.
     StorageContainerException;
 import org.apache.hadoop.ozone.OzoneConfigKeys;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index f0c2845..b5092d9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineAction;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -372,7 +372,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
       ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID,
       RaftClientRequest.Type type) {
     return new RaftClientRequest(clientId, server.getId(),
-        PipelineID.getFromProtobuf(pipelineID).getRaftGroupID(),
+        RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()),
         nextCallId(), 0, Message.valueOf(request.toByteString()), type);
   }
 
@@ -405,7 +405,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
           + roleInfoProto.getRole());
     }
 
-    PipelineID pipelineID = PipelineID.valueOf(groupId);
+    PipelineID pipelineID = PipelineID.valueOf(groupId.getUuid());
     ClosePipelineInfo.Builder closePipelineInfo =
         ClosePipelineInfo.newBuilder()
             .setPipelineID(pipelineID.getProtobuf())
@@ -429,8 +429,8 @@ public final class XceiverServerRatis implements XceiverServerSpi {
       List<PipelineReport> reports = new ArrayList<>();
       for (RaftGroupId groupId : gids) {
         reports.add(PipelineReport.newBuilder()
-                .setPipelineID(PipelineID.valueOf(groupId).getProtobuf())
-                .build());
+            .setPipelineID(PipelineID.valueOf(groupId.getUuid()).getProtobuf())
+            .build());
       }
       return reports;
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
index c2c20a4..7849bcd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 
 /**
  * Asks datanode to close a container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 049aa3f..681d021 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -305,7 +305,7 @@ public class BlockManagerImpl implements EventHandler<Boolean>,
   private AllocatedBlock newBlock(ContainerWithPipeline containerWithPipeline,
       HddsProtos.LifeCycleState state) throws IOException {
     ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
-    if (containerWithPipeline.getPipeline().getDatanodes().size() == 0) {
+    if (containerWithPipeline.getPipeline().getNodes().size() == 0) {
       LOG.error("Pipeline Machine count is zero.");
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index 5c112a0..70e9b5d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -31,7 +31,7 @@ import java.util.UUID;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 
 /**
  * A wrapper class to hold info about datanode and all deleted block
@@ -74,7 +74,7 @@ public class DatanodeDeletedBlockTransactions {
     }
 
     boolean success = false;
-    for (DatanodeDetails dd : pipeline.getMachines()) {
+    for (DatanodeDetails dd : pipeline.getNodes()) {
       UUID dnID = dd.getUuid();
       if (dnsWithTransactionCommitted == null ||
           !dnsWithTransactionCommitted.contains(dnID)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 51790be..2a8a3e3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.scm.command
     .CommandStatusReportHandler.DeleteBlockStatus;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -261,8 +261,7 @@ public class DeletedBlockLogImpl
           Pipeline pipeline =
               containerManager.getContainerWithPipeline(
                   ContainerID.valueof(containerId)).getPipeline();
-          Collection<DatanodeDetails> containerDnsDetails =
-              pipeline.getDatanodes().values();
+          Collection<DatanodeDetails> containerDnsDetails = pipeline.getNodes();
           // The delete entry can be safely removed from the log if all the
           // corresponding nodes commit the txn. It is required to check that
           // the nodes returned in the pipeline match the replication factor.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index 74edbc2..69574a9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.container;
 import java.io.IOException;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
@@ -123,7 +123,7 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
             info.getReplicationType(), info.getPipelineID());
 
     Pipeline pipeline = containerWithPipeline.getPipeline();
-    pipeline.getMachines().stream()
+    pipeline.getNodes().stream()
         .map(node ->
             new CommandForDatanode<>(node.getUuid(), closeContainerCommand))
         .forEach(command -> publisher.fireEvent(DATANODE_COMMAND, command));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index 5dba8fd..0a48915 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -73,7 +73,7 @@ public interface ContainerManager extends Closeable {
    * @throws IOException
    */
   ContainerWithPipeline getContainerWithPipeline(ContainerID containerID)
-      throws ContainerNotFoundException;
+      throws ContainerNotFoundException, PipelineNotFoundException;
 
   /**
    * Returns containers under certain conditions.
@@ -175,6 +175,4 @@ public interface ContainerManager extends Closeable {
   ContainerWithPipeline getMatchingContainerWithPipeline(long size,
       String owner, ReplicationType type, ReplicationFactor factor,
       LifeCycleState state) throws IOException;
-
-  PipelineSelector getPipelineSelector();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 42b39f9..87505c3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -22,11 +22,11 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerState;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -42,6 +42,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.Set;
@@ -232,19 +233,28 @@ public class ContainerStateManager {
   /**
    * Allocates a new container based on the type, replication etc.
    *
-   * @param selector -- Pipeline selector class.
+   * @param pipelineManager -- Pipeline Manager class.
    * @param type -- Replication type.
    * @param replicationFactor - Replication replicationFactor.
    * @return ContainerWithPipeline
    * @throws IOException  on Failure.
    */
-  ContainerInfo allocateContainer(final PipelineSelector selector,
+  ContainerInfo allocateContainer(final PipelineManager pipelineManager,
       final HddsProtos.ReplicationType type,
       final HddsProtos.ReplicationFactor replicationFactor, final String owner)
       throws IOException {
 
-    final Pipeline pipeline = selector.getReplicationPipeline(type,
-        replicationFactor);
+    Pipeline pipeline;
+    try {
+      pipeline = pipelineManager.createPipeline(type, replicationFactor);
+    } catch (IOException e) {
+      final List<Pipeline> pipelines =
+          pipelineManager.getPipelines(type, replicationFactor);
+      if (pipelines.isEmpty()) {
+        throw new IOException("Could not allocate container");
+      }
+      pipeline = pipelines.get((int) containerCount.get() % pipelines.size());
+    }
 
     Preconditions.checkNotNull(pipeline, "Pipeline type=%s/"
         + "replication=%s couldn't be found for the new container. "
@@ -263,7 +273,8 @@ public class ContainerStateManager {
         .setReplicationFactor(replicationFactor)
         .setReplicationType(pipeline.getType())
         .build();
-    selector.addContainerToPipeline(pipeline.getId(), containerID);
+    pipelineManager.addContainerToPipeline(pipeline.getId(),
+        ContainerID.valueof(containerID));
     Preconditions.checkNotNull(containerInfo);
     containers.addContainer(containerInfo);
     LOG.trace("New container allocated: {}", containerInfo);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 96ad731..1666b7c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -26,13 +26,13 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.scm.block.PendingDeleteStatusList;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -86,7 +86,7 @@ public class SCMContainerManager implements ContainerManager {
 
   private final Lock lock;
   private final MetadataStore containerStore;
-  private final PipelineSelector pipelineSelector;
+  private final PipelineManager pipelineManager;
   private final ContainerStateManager containerStateManager;
   private final LeaseManager<ContainerInfo> containerLeaseManager;
   private final EventPublisher eventPublisher;
@@ -102,12 +102,13 @@ public class SCMContainerManager implements ContainerManager {
    * passed to LevelDB and this memory is allocated in Native code space.
    * CacheSize is specified
    * in MB.
+   * @param pipelineManager - PipelineManager
    * @throws IOException on Failure.
    */
   @SuppressWarnings("unchecked")
   public SCMContainerManager(final Configuration conf,
-      final NodeManager nodeManager, final EventPublisher eventPublisher)
-      throws IOException {
+      final NodeManager nodeManager, PipelineManager pipelineManager,
+      final EventPublisher eventPublisher) throws IOException {
 
     final File metaDir = getOzoneMetaDirPath(conf);
     final File containerDBPath = new File(metaDir, SCM_CONTAINER_DB);
@@ -123,8 +124,7 @@ public class SCMContainerManager implements ContainerManager {
     this.lock = new ReentrantLock();
     this.size = (long) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
         OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
-    this.pipelineSelector = new PipelineSelector(nodeManager,
-            conf, eventPublisher, cacheSize);
+    this.pipelineManager = pipelineManager;
     this.containerStateManager = new ContainerStateManager(conf);
     this.eventPublisher = eventPublisher;
 
@@ -147,8 +147,10 @@ public class SCMContainerManager implements ContainerManager {
           HddsProtos.SCMContainerInfo.PARSER.parseFrom(entry.getValue()));
       Preconditions.checkNotNull(container);
       containerStateManager.loadContainer(container);
-      pipelineSelector.addContainerToPipeline(
-          container.getPipelineID(), container.getContainerID());
+      if (container.isOpen()) {
+        pipelineManager.addContainerToPipeline(container.getPipelineID(),
+            ContainerID.valueof(container.getContainerID()));
+      }
     }
   }
 
@@ -214,28 +216,23 @@ public class SCMContainerManager implements ContainerManager {
    */
   @Override
   public ContainerWithPipeline getContainerWithPipeline(ContainerID containerID)
-      throws ContainerNotFoundException {
+      throws ContainerNotFoundException, PipelineNotFoundException {
     lock.lock();
     try {
       final ContainerInfo contInfo = getContainer(containerID);
       Pipeline pipeline;
-      String leaderId = "";
       if (contInfo.isOpen()) {
         // If pipeline with given pipeline Id already exist return it
-        pipeline = pipelineSelector.getPipeline(contInfo.getPipelineID());
+        pipeline = pipelineManager.getPipeline(contInfo.getPipelineID());
       } else {
         // For close containers create pipeline from datanodes with replicas
         Set<ContainerReplica> dnWithReplicas = containerStateManager
             .getContainerReplicas(contInfo.containerID());
-        if (!dnWithReplicas.isEmpty()) {
-          leaderId = dnWithReplicas.iterator().next()
-              .getDatanodeDetails().getUuidString();
-        }
-        pipeline = new Pipeline(leaderId, contInfo.getState(),
-            ReplicationType.STAND_ALONE, contInfo.getReplicationFactor(),
-            PipelineID.randomId());
-        dnWithReplicas.stream().map(ContainerReplica::getDatanodeDetails).
-            forEach(pipeline::addMember);
+        List<DatanodeDetails> dns =
+            dnWithReplicas.stream().map(ContainerReplica::getDatanodeDetails)
+                .collect(Collectors.toList());
+        pipeline = pipelineManager.createPipeline(ReplicationType.STAND_ALONE,
+            contInfo.getReplicationFactor(), dns);
       }
       return new ContainerWithPipeline(contInfo, pipeline);
     } finally {
@@ -290,8 +287,8 @@ public class SCMContainerManager implements ContainerManager {
     lock.lock();
     try {
       final ContainerInfo containerInfo; containerInfo = containerStateManager
-          .allocateContainer(pipelineSelector, type, replicationFactor, owner);
-      final Pipeline pipeline = pipelineSelector.getPipeline(
+          .allocateContainer(pipelineManager, type, replicationFactor, owner);
+      final Pipeline pipeline = pipelineManager.getPipeline(
           containerInfo.getPipelineID());
 
       try {
@@ -360,8 +357,8 @@ public class SCMContainerManager implements ContainerManager {
       ContainerInfo updatedContainer =
           updateContainerStateInternal(containerID, event);
       if (!updatedContainer.isOpen()) {
-        pipelineSelector.removeContainerFromPipeline(
-            updatedContainer.getPipelineID(), containerID.getId());
+        pipelineManager.removeContainerFromPipeline(
+            updatedContainer.getPipelineID(), containerID);
       }
       final byte[] dbKey = Longs.toByteArray(containerID.getId());
       containerStore.put(dbKey, updatedContainer.getProtobuf().toByteArray());
@@ -485,7 +482,7 @@ public class SCMContainerManager implements ContainerManager {
     if (containerInfo == null) {
       return null;
     }
-    Pipeline pipeline = pipelineSelector
+    Pipeline pipeline = pipelineManager
         .getPipeline(containerInfo.getPipelineID());
     return new ContainerWithPipeline(containerInfo, pipeline);
   }
@@ -647,13 +644,5 @@ public class SCMContainerManager implements ContainerManager {
     if (containerStore != null) {
       containerStore.close();
     }
-
-    if (pipelineSelector != null) {
-      pipelineSelector.shutdown();
-    }
-  }
-
-  public PipelineSelector getPipelineSelector() {
-    return pipelineSelector;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
index 77b8713..30a7c34 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler
         .CloseContainerRetryableReq;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
         .PipelineReportFromDatanode;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
@@ -103,14 +102,6 @@ public final class SCMEvents {
       "Pipeline_Actions");
 
   /**
-   * Pipeline close event are triggered to close pipeline because of failure,
-   * stale node, decommissioning etc.
-   */
-  public static final TypedEvent<PipelineID>
-      PIPELINE_CLOSE = new TypedEvent<>(PipelineID.class,
-      "Pipeline_Close");
-
-  /**
    * A Command status report will be sent by datanodes. This repoort is received
    * by SCMDatanodeHeartbeatDispatcher and CommandReport event is generated.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 5f6a2e4..d55ff98 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index 88f984b..588756c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -25,8 +25,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index acec6aa..35c22f3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -22,8 +22,8 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDDS-744. Fix ASF license warning in PipelineNotFoundException class. Contributed by Lokesh Jain.

Posted by su...@apache.org.
HDDS-744. Fix ASF license warning in PipelineNotFoundException class.
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fa01f82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fa01f82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fa01f82

Branch: refs/heads/HDFS-12943
Commit: 2fa01f823c631c9a8617aa32937e1a0ba8fc5317
Parents: d07e873
Author: Nanda kumar <na...@apache.org>
Authored: Sat Oct 27 16:51:08 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Sat Oct 27 16:51:08 2018 +0530

----------------------------------------------------------------------
 .../scm/pipeline/PipelineNotFoundException.java   | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fa01f82/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
index 4568379..2a89aab 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineNotFoundException.java
@@ -1,3 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
 package org.apache.hadoop.hdds.scm.pipeline;
 
 import java.io.IOException;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HADOOP-9567. Provide auto-renewal for keytab based logins. Contributed by Hrishikesh Gadre, Gary Helmling and Harsh J.

Posted by su...@apache.org.
HADOOP-9567. Provide auto-renewal for keytab based logins. Contributed by Hrishikesh Gadre, Gary Helmling  and Harsh J.

Signed-off-by: Wei-Chiu Chuang <we...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfb9adc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfb9adc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfb9adc2

Branch: refs/heads/HDFS-12943
Commit: bfb9adc2b9e6e97f1036bcf8ea4cee6893a782b2
Parents: 2fa01f8
Author: Hrishikesh Gadre <hg...@apache.org>
Authored: Sat Oct 27 08:58:10 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Sat Oct 27 08:59:47 2018 -0700

----------------------------------------------------------------------
 .../fs/CommonConfigurationKeysPublic.java       |  12 ++
 .../hadoop/security/UserGroupInformation.java   | 192 ++++++++++++++++---
 .../src/main/resources/core-default.xml         |   8 +
 .../hadoop/security/TestUGILoginFromKeytab.java |  56 ++++++
 .../security/TestUserGroupInformation.java      |   2 +-
 5 files changed, 245 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfb9adc2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 8523423..7410c39 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -636,6 +636,18 @@ public class CommonConfigurationKeysPublic {
   /** Default value for HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN */
   public static final int HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT =
           60;
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED =
+          "hadoop.kerberos.keytab.login.autorenewal.enabled";
+  /** Default value for HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED. */
+  public static final boolean
+          HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED_DEFAULT = false;
+
   /**
    * @see
    * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfb9adc2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 915d6df..60a85c1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.security;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
 import static org.apache.hadoop.security.UGIExceptionMessages.*;
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
@@ -46,7 +48,11 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
@@ -280,6 +286,11 @@ public class UserGroupInformation {
   private static Groups groups;
   /** Min time (in seconds) before relogin for Kerberos */
   private static long kerberosMinSecondsBeforeRelogin;
+  /** Boolean flag to enable auto-renewal for keytab based loging. */
+  private static boolean kerberosKeyTabLoginRenewalEnabled;
+  /** A reference to Kerberos login auto renewal thread. */
+  private static Optional<ExecutorService> kerberosLoginRenewalExecutor =
+          Optional.empty();
   /** The configuration to use */
 
   private static Configuration conf;
@@ -332,6 +343,11 @@ public class UserGroupInformation {
                 HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN + " of " +
                 conf.get(HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN));
     }
+
+    kerberosKeyTabLoginRenewalEnabled = conf.getBoolean(
+            HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED,
+            HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED_DEFAULT);
+
     // If we haven't set up testing groups, use the configuration to find it
     if (!(groups instanceof TestingGroups)) {
       groups = Groups.getUserToGroupsMappingService(conf);
@@ -372,6 +388,8 @@ public class UserGroupInformation {
     conf = null;
     groups = null;
     kerberosMinSecondsBeforeRelogin = 0;
+    kerberosKeyTabLoginRenewalEnabled = false;
+    kerberosLoginRenewalExecutor = Optional.empty();
     setLoginUser(null);
     HadoopKerberosName.setRules(null);
   }
@@ -392,7 +410,23 @@ public class UserGroupInformation {
     ensureInitialized();
     return (authenticationMethod == method);
   }
-  
+
+  @InterfaceAudience.Private
+  @InterfaceStability.Evolving
+  @VisibleForTesting
+  static boolean isKerberosKeyTabLoginRenewalEnabled() {
+    ensureInitialized();
+    return kerberosKeyTabLoginRenewalEnabled;
+  }
+
+  @InterfaceAudience.Private
+  @InterfaceStability.Evolving
+  @VisibleForTesting
+  static Optional<ExecutorService> getKerberosLoginRenewalExecutor() {
+    ensureInitialized();
+    return kerberosLoginRenewalExecutor;
+  }
+
   /**
    * Information about the logged in user.
    */
@@ -838,14 +872,16 @@ public class UserGroupInformation {
     return hasKerberosCredentials() && isHadoopLogin();
   }
 
-  @InterfaceAudience.Private
-  @InterfaceStability.Unstable
-  @VisibleForTesting
   /**
-   * Spawn a thread to do periodic renewals of kerberos credentials from
-   * a ticket cache.  NEVER directly call this method.
+   * Spawn a thread to do periodic renewals of kerberos credentials. NEVER
+   * directly call this method. This method should only be used for ticket cache
+   * based kerberos credentials.
+   *
    * @param force - used by tests to forcibly spawn thread
    */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  @VisibleForTesting
   void spawnAutoRenewalThreadForUserCreds(boolean force) {
     if (!force && (!shouldRelogin() || isFromKeytab())) {
       return;
@@ -858,25 +894,71 @@ public class UserGroupInformation {
     }
     String cmd = conf.get("hadoop.kerberos.kinit.command", "kinit");
     long nextRefresh = getRefreshTime(tgt);
-    Thread t =
-        new Thread(new AutoRenewalForUserCredsRunnable(tgt, cmd, nextRefresh));
-    t.setDaemon(true);
-    t.setName("TGT Renewer for " + getUserName());
-    t.start();
+    executeAutoRenewalTask(getUserName(),
+            new TicketCacheRenewalRunnable(tgt, cmd, nextRefresh));
+  }
+
+  /**
+   * Spawn a thread to do periodic renewals of kerberos credentials from a
+   * keytab file.
+   */
+  private void spawnAutoRenewalThreadForKeytab() {
+    if (!shouldRelogin() || isFromTicket()) {
+      return;
+    }
+
+    // spawn thread only if we have kerb credentials
+    KerberosTicket tgt = getTGT();
+    if (tgt == null) {
+      return;
+    }
+    long nextRefresh = getRefreshTime(tgt);
+    executeAutoRenewalTask(getUserName(),
+            new KeytabRenewalRunnable(tgt, nextRefresh));
   }
 
+  /**
+   * Spawn a thread to do periodic renewals of kerberos credentials from a
+   * keytab file. NEVER directly call this method.
+   *
+   * @param userName Name of the user for which login needs to be renewed.
+   * @param task  The reference of the login renewal task.
+   */
+  private void executeAutoRenewalTask(final String userName,
+                                      AutoRenewalForUserCredsRunnable task) {
+    kerberosLoginRenewalExecutor = Optional.of(
+            Executors.newSingleThreadExecutor(
+                  new ThreadFactory() {
+                    @Override
+                    public Thread newThread(Runnable r) {
+                      Thread t = new Thread(r);
+                      t.setDaemon(true);
+                      t.setName("TGT Renewer for " + userName);
+                      return t;
+                    }
+                  }
+            ));
+    kerberosLoginRenewalExecutor.get().submit(task);
+  }
+
+  /**
+   * An abstract class which encapsulates the functionality required to
+   * auto renew Kerbeors TGT. The concrete implementations of this class
+   * are expected to provide implementation required to perform actual
+   * TGT renewal (see {@code TicketCacheRenewalRunnable} and
+   * {@code KeytabRenewalRunnable}).
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
   @VisibleForTesting
-  class AutoRenewalForUserCredsRunnable implements Runnable {
+  abstract class AutoRenewalForUserCredsRunnable implements Runnable {
     private KerberosTicket tgt;
     private RetryPolicy rp;
-    private String kinitCmd;
     private long nextRefresh;
     private boolean runRenewalLoop = true;
 
-    AutoRenewalForUserCredsRunnable(KerberosTicket tgt, String kinitCmd,
-        long nextRefresh){
+    AutoRenewalForUserCredsRunnable(KerberosTicket tgt, long nextRefresh) {
       this.tgt = tgt;
-      this.kinitCmd = kinitCmd;
       this.nextRefresh = nextRefresh;
       this.rp = null;
     }
@@ -885,6 +967,13 @@ public class UserGroupInformation {
       this.runRenewalLoop = runRenewalLoop;
     }
 
+    /**
+     * This method is used to perform renewal of kerberos login ticket.
+     * The concrete implementations of this class should provide specific
+     * logic required to perform renewal as part of this method.
+     */
+    protected abstract void relogin() throws IOException;
+
     @Override
     public void run() {
       do {
@@ -897,11 +986,7 @@ public class UserGroupInformation {
           if (now < nextRefresh) {
             Thread.sleep(nextRefresh - now);
           }
-          String output = Shell.execCommand(kinitCmd, "-R");
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Renewed ticket. kinit output: {}", output);
-          }
-          reloginFromTicketCache();
+          relogin();
           tgt = getTGT();
           if (tgt == null) {
             LOG.warn("No TGT after renewal. Aborting renew thread for " +
@@ -972,6 +1057,52 @@ public class UserGroupInformation {
   }
 
   /**
+   * A concrete implementation of {@code AutoRenewalForUserCredsRunnable} class
+   * which performs TGT renewal using kinit command.
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  @VisibleForTesting
+  final class TicketCacheRenewalRunnable
+      extends AutoRenewalForUserCredsRunnable {
+    private String kinitCmd;
+
+    TicketCacheRenewalRunnable(KerberosTicket tgt, String kinitCmd,
+        long nextRefresh) {
+      super(tgt, nextRefresh);
+      this.kinitCmd = kinitCmd;
+    }
+
+    @Override
+    public void relogin() throws IOException {
+      String output = Shell.execCommand(kinitCmd, "-R");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Renewed ticket. kinit output: {}", output);
+      }
+      reloginFromTicketCache();
+    }
+  }
+
+  /**
+   * A concrete implementation of {@code AutoRenewalForUserCredsRunnable} class
+   * which performs TGT renewal using specified keytab.
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  @VisibleForTesting
+  final class KeytabRenewalRunnable extends AutoRenewalForUserCredsRunnable {
+
+    KeytabRenewalRunnable(KerberosTicket tgt, long nextRefresh) {
+      super(tgt, nextRefresh);
+    }
+
+    @Override
+    public void relogin() throws IOException {
+      reloginFromKeytab();
+    }
+  }
+
+  /**
    * Get time for next login retry. This will allow the thread to retry with
    * exponential back-off, until tgt endtime.
    * Last retry is {@link #kerberosMinSecondsBeforeRelogin} before endtime.
@@ -1007,9 +1138,16 @@ public class UserGroupInformation {
     if (!isSecurityEnabled())
       return;
 
-    setLoginUser(loginUserFromKeytabAndReturnUGI(user, path));
-    LOG.info("Login successful for user " + user
-        + " using keytab file " + path);
+    UserGroupInformation u = loginUserFromKeytabAndReturnUGI(user, path);
+    if (isKerberosKeyTabLoginRenewalEnabled()) {
+      u.spawnAutoRenewalThreadForKeytab();
+    }
+
+    setLoginUser(u);
+
+    LOG.info("Login successful for user {} using keytab file {}. Keytab auto" +
+            " renewal enabled : {}",
+            user, path, isKerberosKeyTabLoginRenewalEnabled());
   }
 
   /**
@@ -1027,6 +1165,12 @@ public class UserGroupInformation {
     if (!hasKerberosCredentials()) {
       return;
     }
+
+    // Shutdown the background task performing login renewal.
+    if (getKerberosLoginRenewalExecutor().isPresent()) {
+      getKerberosLoginRenewalExecutor().get().shutdownNow();
+    }
+
     HadoopLoginContext login = getLogin();
     String keytabFile = getKeytab();
     if (login == null || keytabFile == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfb9adc2/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ce3a407..b243a9c 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -657,6 +657,14 @@
 </property>
 
 <property>
+  <name>hadoop.kerberos.keytab.login.autorenewal.enabled</name>
+  <value>false</value>
+  <description>Used to enable automatic renewal of keytab based kerberos login.
+    By default the automatic renewal is disabled for keytab based kerberos login.
+  </description>
+</property>
+
+<property>
   <name>hadoop.security.auth_to_local</name>
   <value></value>
   <description>Maps kerberos principals to local user names</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfb9adc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
index 826e4b2..8ede451 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGILoginFromKeytab.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -31,7 +32,10 @@ import org.junit.rules.TemporaryFolder;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import org.slf4j.event.Level;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -199,6 +203,58 @@ public class TestUGILoginFromKeytab {
     Assert.assertSame(dummyLogin, user.getLogin());
   }
 
+  @Test
+  public void testUGIRefreshFromKeytab() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setBoolean(HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED, true);
+    SecurityUtil.setAuthenticationMethod(
+            UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
+    UserGroupInformation.setConfiguration(conf);
+
+    String principal = "bar";
+    File keytab = new File(workDir, "bar.keytab");
+    kdc.createPrincipal(keytab, principal);
+
+    UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
+
+    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+
+    Assert.assertEquals(UserGroupInformation.AuthenticationMethod.KERBEROS,
+        ugi.getAuthenticationMethod());
+    Assert.assertTrue(ugi.isFromKeytab());
+    Assert.assertTrue(
+            UserGroupInformation.isKerberosKeyTabLoginRenewalEnabled());
+    Assert.assertTrue(
+            UserGroupInformation.getKerberosLoginRenewalExecutor()
+                    .isPresent());
+  }
+
+  @Test
+  public void testUGIRefreshFromKeytabDisabled() throws Exception {
+    GenericTestUtils.setLogLevel(UserGroupInformation.LOG, Level.DEBUG);
+    final Configuration conf = new Configuration();
+    conf.setLong(HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN, 1);
+    conf.setBoolean(HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED, false);
+    SecurityUtil.setAuthenticationMethod(
+            UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
+    UserGroupInformation.setConfiguration(conf);
+
+    String principal = "bar";
+    File keytab = new File(workDir, "bar.keytab");
+    kdc.createPrincipal(keytab, principal);
+
+    UserGroupInformation.loginUserFromKeytab(principal, keytab.getPath());
+
+    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+    Assert.assertEquals(UserGroupInformation.AuthenticationMethod.KERBEROS,
+            ugi.getAuthenticationMethod());
+    Assert.assertTrue(ugi.isFromKeytab());
+    Assert.assertFalse(
+            UserGroupInformation.isKerberosKeyTabLoginRenewalEnabled());
+    Assert.assertFalse(
+            UserGroupInformation.getKerberosLoginRenewalExecutor()
+                    .isPresent());
+  }
 
   private static KerberosTicket getTicket(UserGroupInformation ugi) {
     Set<KerberosTicket> tickets =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfb9adc2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
index 011e930..3020f9b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
@@ -1239,7 +1239,7 @@ public class TestUserGroupInformation {
 
     // run AutoRenewalForUserCredsRunnable with this
     UserGroupInformation.AutoRenewalForUserCredsRunnable userCredsRunnable =
-        ugi.new AutoRenewalForUserCredsRunnable(tgt,
+        ugi.new TicketCacheRenewalRunnable(tgt,
             Boolean.toString(Boolean.TRUE), 100);
 
     // Set the runnable to not to run in a loop


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HDDS-694. Plugin new Pipeline management code in SCM. Contributed by Lokesh Jain.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
index 48939f1..9df9dff 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
@@ -19,24 +19,44 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Set;
 
 /**
  * Handles Stale node event.
  */
 public class StaleNodeHandler implements EventHandler<DatanodeDetails> {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(StaleNodeHandler.class);
 
-  private final PipelineSelector pipelineSelector;
+  private final NodeManager nodeManager;
+  private final PipelineManager pipelineManager;
 
-  public StaleNodeHandler(PipelineSelector pipelineSelector) {
-    this.pipelineSelector = pipelineSelector;
+  public StaleNodeHandler(NodeManager nodeManager,
+      PipelineManager pipelineManager) {
+    this.nodeManager = nodeManager;
+    this.pipelineManager = pipelineManager;
   }
 
   @Override
   public void onMessage(DatanodeDetails datanodeDetails,
                         EventPublisher publisher) {
-    pipelineSelector.handleStaleNode(datanodeDetails);
+    Set<PipelineID> pipelineIds =
+        nodeManager.getPipelineByDnID(datanodeDetails.getUuid());
+    for (PipelineID pipelineID : pipelineIds) {
+      try {
+        pipelineManager.finalizePipeline(pipelineID);
+      } catch (IOException e) {
+        LOG.info("Could not finalize pipeline={} for dn={}", pipelineID,
+            datanodeDetails);
+      }
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
index 87f2222..bf19261 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdds.scm.node.states;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 
 import java.util.HashSet;
 import java.util.Set;
@@ -55,7 +55,7 @@ public class Node2PipelineMap extends Node2ObjectsMap<PipelineID> {
    * @param pipeline Pipeline to be added
    */
   public synchronized void addPipeline(Pipeline pipeline) {
-    for (DatanodeDetails details : pipeline.getDatanodes().values()) {
+    for (DatanodeDetails details : pipeline.getNodes()) {
       UUID dnId = details.getUuid();
       dn2ObjectMap.computeIfAbsent(dnId, k -> new HashSet<>())
           .add(pipeline.getId());
@@ -63,7 +63,7 @@ public class Node2PipelineMap extends Node2ObjectsMap<PipelineID> {
   }
 
   public synchronized void removePipeline(Pipeline pipeline) {
-    for (DatanodeDetails details : pipeline.getDatanodes().values()) {
+    for (DatanodeDetails details : pipeline.getNodes()) {
       UUID dnId = details.getUuid();
       dn2ObjectMap.computeIfPresent(dnId,
           (k, v) -> {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
index 261c544..c06a3bd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
@@ -50,8 +50,8 @@ public final class PipelineFactory {
     return providers.get(type).create(factor);
   }
 
-  public Pipeline create(ReplicationType type, List<DatanodeDetails> nodes)
-      throws IOException {
-    return providers.get(type).create(nodes);
+  public Pipeline create(ReplicationType type, ReplicationFactor factor,
+      List<DatanodeDetails> nodes) {
+    return providers.get(type).create(factor, nodes);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index 51f9e86..04ec535 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -36,14 +36,14 @@ public interface PipelineManager extends Closeable {
   Pipeline createPipeline(ReplicationType type, ReplicationFactor factor)
       throws IOException;
 
-  Pipeline createPipeline(ReplicationType type, List<DatanodeDetails> nodes)
-      throws IOException;
+  Pipeline createPipeline(ReplicationType type, ReplicationFactor factor,
+      List<DatanodeDetails> nodes);
 
-  Pipeline getPipeline(PipelineID pipelineID) throws IOException;
+  Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException;
 
-  List<Pipeline> getPipelinesByType(ReplicationType type);
+  List<Pipeline> getPipelines(ReplicationType type);
 
-  List<Pipeline> getPipelinesByTypeAndFactor(ReplicationType type,
+  List<Pipeline> getPipelines(ReplicationType type,
       ReplicationFactor factor);
 
   void addContainerToPipeline(PipelineID pipelineID, ContainerID containerID)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
index 2fc2e0e..84b6375 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java
@@ -31,5 +31,5 @@ public interface PipelineProvider {
 
   Pipeline create(ReplicationFactor factor) throws IOException;
 
-  Pipeline create(List<DatanodeDetails> nodes) throws IOException;
+  Pipeline create(ReplicationFactor factor, List<DatanodeDetails> nodes);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index ad11b47..6c31a12 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -76,7 +76,13 @@ public class PipelineReportHandler implements
   private void processPipelineReport(PipelineReport report, DatanodeDetails dn)
       throws IOException {
     PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID());
-    Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
+    Pipeline pipeline = null;
+    try {
+      pipeline = pipelineManager.getPipeline(pipelineID);
+    } catch (PipelineNotFoundException e) {
+      //TODO: introduce per datanode command for pipeline destroy
+      return;
+    }
 
     if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) {
       pipeline.reportDatanode(dn);
@@ -87,14 +93,14 @@ public class PipelineReportHandler implements
     } else if (pipeline.isClosed()) {
       int numContainers = pipelineManager.getNumberOfContainers(pipelineID);
       if (numContainers == 0) {
-        // if all the containers have been closed the pipeline can be destroyed
+        // remove the pipeline from the pipeline manager
+        pipelineManager.removePipeline(pipelineID);
+        // since all the containers have been closed the pipeline can be
+        // destroyed
         try (XceiverClientRatis client =
             XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
           client.destroyPipeline();
         }
-        // after successfully destroying the pipeline, the pipeline can be
-        // removed from the pipeline manager
-        pipelineManager.removePipeline(pipelineID);
       }
     } else {
       // In OPEN state case just report the datanode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index 8f5f89a..67f74d3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
@@ -39,9 +37,6 @@ import java.util.Set;
  */
 class PipelineStateManager {
 
-  private static final Logger LOG = LoggerFactory.getLogger(
-      org.apache.hadoop.hdds.scm.pipelines.PipelineStateManager.class);
-
   private final PipelineStateMap pipelineStateMap;
 
   PipelineStateManager(Configuration conf) {
@@ -57,17 +52,20 @@ class PipelineStateManager {
     pipelineStateMap.addContainerToPipeline(pipelineId, containerID);
   }
 
-  Pipeline getPipeline(PipelineID pipelineID) throws IOException {
+  Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException {
     return pipelineStateMap.getPipeline(pipelineID);
   }
 
-  List<Pipeline> getPipelinesByType(ReplicationType type) {
-    return pipelineStateMap.getPipelinesByType(type);
+  List<Pipeline> getPipelines(ReplicationType type) {
+    return pipelineStateMap.getPipelines(type);
+  }
+
+  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor) {
+    return pipelineStateMap.getPipelines(type, factor);
   }
 
-  List<Pipeline> getPipelinesByTypeAndFactor(ReplicationType type,
-      ReplicationFactor factor) {
-    return pipelineStateMap.getPipelinesByTypeAndFactor(type, factor);
+  List<Pipeline> getPipelines(ReplicationType type, PipelineState... states) {
+    return pipelineStateMap.getPipelines(type, states);
   }
 
   Set<ContainerID> getContainers(PipelineID pipelineID) throws IOException {
@@ -78,8 +76,8 @@ class PipelineStateManager {
     return pipelineStateMap.getNumberOfContainers(pipelineID);
   }
 
-  void removePipeline(PipelineID pipelineID) throws IOException {
-    pipelineStateMap.removePipeline(pipelineID);
+  Pipeline removePipeline(PipelineID pipelineID) throws IOException {
+    return pipelineStateMap.removePipeline(pipelineID);
   }
 
   void removeContainerFromPipeline(PipelineID pipelineID,
@@ -87,7 +85,8 @@ class PipelineStateManager {
     pipelineStateMap.removeContainerFromPipeline(pipelineID, containerID);
   }
 
-  Pipeline finalizePipeline(PipelineID pipelineId) throws IOException {
+  Pipeline finalizePipeline(PipelineID pipelineId)
+      throws PipelineNotFoundException {
     Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId);
     if (!pipeline.isClosed()) {
       pipeline = pipelineStateMap

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
index 110d26b..7b69491 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateMap.java
@@ -31,8 +31,7 @@ import java.util.stream.Collectors;
 
 /**
  * Holds the data structures which maintain the information about pipeline and
- * its state. All the read write operations in this class are protected by a
- * lock.
+ * its state.
  * Invariant: If a pipeline exists in PipelineStateMap, both pipelineMap and
  * pipeline2container would have a non-null mapping for it.
  */
@@ -65,12 +64,12 @@ class PipelineStateMap {
         String.format("Nodes size=%d, replication factor=%d do not match ",
                 pipeline.getNodes().size(), pipeline.getFactor().getNumber()));
 
-    if (pipelineMap.putIfAbsent(pipeline.getID(), pipeline) != null) {
-      LOG.warn("Duplicate pipeline ID detected. {}", pipeline.getID());
+    if (pipelineMap.putIfAbsent(pipeline.getId(), pipeline) != null) {
+      LOG.warn("Duplicate pipeline ID detected. {}", pipeline.getId());
       throw new IOException(String
-          .format("Duplicate pipeline ID %s detected.", pipeline.getID()));
+          .format("Duplicate pipeline ID %s detected.", pipeline.getId()));
     }
-    pipeline2container.put(pipeline.getID(), new TreeSet<>());
+    pipeline2container.put(pipeline.getId(), new TreeSet<>());
   }
 
   /**
@@ -85,12 +84,13 @@ class PipelineStateMap {
     Preconditions.checkNotNull(pipelineID,
         "Pipeline Id cannot be null");
     Preconditions.checkNotNull(containerID,
-        "container Id cannot be null");
+        "Container Id cannot be null");
 
     Pipeline pipeline = getPipeline(pipelineID);
-    if (!pipeline.isOpen()) {
-      throw new IOException(
-          String.format("%s is not in open state", pipelineID));
+    if (pipeline.isClosed()) {
+      throw new IOException(String
+          .format("Cannot add container to pipeline=%s in closed state",
+              pipelineID));
     }
     pipeline2container.get(pipelineID).add(containerID);
   }
@@ -102,10 +102,14 @@ class PipelineStateMap {
    * @return Pipeline
    * @throws IOException if pipeline is not found
    */
-  Pipeline getPipeline(PipelineID pipelineID) throws IOException {
+  Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException {
+    Preconditions.checkNotNull(pipelineID,
+        "Pipeline Id cannot be null");
+
     Pipeline pipeline = pipelineMap.get(pipelineID);
     if (pipeline == null) {
-      throw new IOException(String.format("%s not found", pipelineID));
+      throw new PipelineNotFoundException(
+          String.format("%s not found", pipelineID));
     }
     return pipeline;
   }
@@ -116,29 +120,52 @@ class PipelineStateMap {
    * @param type - ReplicationType
    * @return List of pipelines which have the specified replication type
    */
-  List<Pipeline> getPipelinesByType(ReplicationType type) {
+  List<Pipeline> getPipelines(ReplicationType type) {
     Preconditions.checkNotNull(type, "Replication type cannot be null");
 
-    return pipelineMap.values().stream().filter(p -> p.getType().equals(type))
+    return pipelineMap.values().stream()
+        .filter(p -> p.getType().equals(type))
         .collect(Collectors.toList());
   }
 
   /**
-   * Get open pipeline corresponding to specified replication type and factor.
+   * Get pipeline corresponding to specified replication type and factor.
    *
    * @param type - ReplicationType
    * @param factor - ReplicationFactor
-   * @return List of open pipelines with specified replication type and factor
+   * @return List of pipelines with specified replication type and factor
    */
-  List<Pipeline> getPipelinesByTypeAndFactor(ReplicationType type,
-      ReplicationFactor factor) {
+  List<Pipeline> getPipelines(ReplicationType type, ReplicationFactor factor) {
+    Preconditions.checkNotNull(type, "Replication type cannot be null");
+    Preconditions.checkNotNull(factor, "Replication factor cannot be null");
+
     return pipelineMap.values().stream()
-        .filter(pipeline -> pipeline.isOpen() && pipeline.getType() == type
+        .filter(pipeline -> pipeline.getType() == type
             && pipeline.getFactor() == factor)
         .collect(Collectors.toList());
   }
 
   /**
+   * Get list of pipeline corresponding to specified replication type and
+   * pipeline states.
+   *
+   * @param type - ReplicationType
+   * @param states - Array of required PipelineState
+   * @return List of pipelines with specified replication type and states
+   */
+  List<Pipeline> getPipelines(ReplicationType type, PipelineState... states) {
+    Preconditions.checkNotNull(type, "Replication type cannot be null");
+    Preconditions.checkNotNull(states, "Pipeline state cannot be null");
+
+    Set<PipelineState> pipelineStates = new HashSet<>();
+    pipelineStates.addAll(Arrays.asList(states));
+    return pipelineMap.values().stream().filter(
+        pipeline -> pipeline.getType() == type && pipelineStates
+            .contains(pipeline.getPipelineState()))
+        .collect(Collectors.toList());
+  }
+
+  /**
    * Get set of containerIDs corresponding to a pipeline.
    *
    * @param pipelineID - PipelineID
@@ -146,10 +173,14 @@ class PipelineStateMap {
    * @throws IOException if pipeline is not found
    */
   Set<ContainerID> getContainers(PipelineID pipelineID)
-      throws IOException {
+      throws PipelineNotFoundException {
+    Preconditions.checkNotNull(pipelineID,
+        "Pipeline Id cannot be null");
+
     Set<ContainerID> containerIDs = pipeline2container.get(pipelineID);
     if (containerIDs == null) {
-      throw new IOException(String.format("%s not found", pipelineID));
+      throw new PipelineNotFoundException(
+          String.format("%s not found", pipelineID));
     }
     return new HashSet<>(containerIDs);
   }
@@ -161,10 +192,15 @@ class PipelineStateMap {
    * @return Number of containers belonging to the pipeline
    * @throws IOException if pipeline is not found
    */
-  int getNumberOfContainers(PipelineID pipelineID) throws IOException {
+  int getNumberOfContainers(PipelineID pipelineID)
+      throws PipelineNotFoundException {
+    Preconditions.checkNotNull(pipelineID,
+        "Pipeline Id cannot be null");
+
     Set<ContainerID> containerIDs = pipeline2container.get(pipelineID);
     if (containerIDs == null) {
-      throw new IOException(String.format("%s not found", pipelineID));
+      throw new PipelineNotFoundException(
+          String.format("%s not found", pipelineID));
     }
     return containerIDs.size();
   }
@@ -175,7 +211,7 @@ class PipelineStateMap {
    * @param pipelineID - PipelineID of the pipeline to be removed
    * @throws IOException if the pipeline is not empty or does not exist
    */
-  void removePipeline(PipelineID pipelineID) throws IOException {
+  Pipeline removePipeline(PipelineID pipelineID) throws IOException {
     Preconditions.checkNotNull(pipelineID, "Pipeline Id cannot be null");
 
     Pipeline pipeline = getPipeline(pipelineID);
@@ -192,6 +228,7 @@ class PipelineStateMap {
 
     pipelineMap.remove(pipelineID);
     pipeline2container.remove(pipelineID);
+    return pipeline;
   }
 
   /**
@@ -210,6 +247,10 @@ class PipelineStateMap {
         "container Id cannot be null");
 
     Set<ContainerID> containerIDs = pipeline2container.get(pipelineID);
+    if (containerIDs == null) {
+      throw new PipelineNotFoundException(
+          String.format("%s not found", pipelineID));
+    }
     containerIDs.remove(containerID);
   }
 
@@ -223,7 +264,7 @@ class PipelineStateMap {
    * @throws IOException if pipeline does not exist
    */
   Pipeline updatePipelineState(PipelineID pipelineID, PipelineState state)
-      throws IOException {
+      throws PipelineNotFoundException {
     Preconditions.checkNotNull(pipelineID, "Pipeline Id cannot be null");
     Preconditions.checkNotNull(state, "Pipeline LifeCycleState cannot be null");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 400ab24..590cd27 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -93,7 +93,7 @@ public class RatisPipelineProvider implements PipelineProvider {
   public Pipeline create(ReplicationFactor factor) throws IOException {
     // Get set of datanodes already used for ratis pipeline
     Set<DatanodeDetails> dnsUsed = new HashSet<>();
-    stateManager.getPipelinesByType(ReplicationType.RATIS)
+    stateManager.getPipelines(ReplicationType.RATIS)
         .forEach(p -> dnsUsed.addAll(p.getNodes()));
 
     // Get list of healthy nodes
@@ -112,7 +112,7 @@ public class RatisPipelineProvider implements PipelineProvider {
 
     Pipeline pipeline = Pipeline.newBuilder()
         .setId(PipelineID.randomId())
-        .setState(PipelineState.ALLOCATED)
+        .setState(PipelineState.OPEN)
         .setType(ReplicationType.RATIS)
         .setFactor(factor)
         .setNodes(dns)
@@ -122,16 +122,11 @@ public class RatisPipelineProvider implements PipelineProvider {
   }
 
   @Override
-  public Pipeline create(List<DatanodeDetails> nodes) throws IOException {
-    ReplicationFactor factor = ReplicationFactor.valueOf(nodes.size());
-    if (factor == null) {
-      throw new IOException(String
-          .format("Nodes size=%d does not match any replication factor",
-              nodes.size()));
-    }
+  public Pipeline create(ReplicationFactor factor,
+      List<DatanodeDetails> nodes) {
     return Pipeline.newBuilder()
         .setId(PipelineID.randomId())
-        .setState(PipelineState.ALLOCATED)
+        .setState(PipelineState.OPEN)
         .setType(ReplicationType.RATIS)
         .setFactor(factor)
         .setNodes(nodes)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index 6a9c783..a853693 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -78,14 +78,14 @@ public class SCMPipelineManager implements PipelineManager {
     File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB);
     this.pipelineStore =
         MetadataStoreBuilder.newBuilder()
+            .setCreateIfMissing(true)
             .setConf(conf)
             .setDbFile(pipelineDBPath)
             .setCacheSize(cacheSize * OzoneConsts.MB)
             .build();
-    initializePipelineState();
-
     this.eventPublisher = eventPublisher;
     this.nodeManager = nodeManager;
+    initializePipelineState();
   }
 
   private void initializePipelineState() throws IOException {
@@ -97,12 +97,11 @@ public class SCMPipelineManager implements PipelineManager {
         pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, null);
 
     for (Map.Entry<byte[], byte[]> entry : pipelines) {
-      Pipeline pipeline = Pipeline
-          .fromProtobuf(HddsProtos.Pipeline.PARSER.parseFrom(entry.getValue()));
+      Pipeline pipeline = Pipeline.getFromProtobuf(
+          HddsProtos.Pipeline.PARSER.parseFrom(entry.getValue()));
       Preconditions.checkNotNull(pipeline);
       stateManager.addPipeline(pipeline);
-      // TODO: add pipeline to node manager
-      // nodeManager.addPipeline(pipeline);
+      nodeManager.addPipeline(pipeline);
     }
   }
 
@@ -112,10 +111,10 @@ public class SCMPipelineManager implements PipelineManager {
     lock.writeLock().lock();
     try {
       Pipeline pipeline =  pipelineFactory.create(type, factor);
-      pipelineStore.put(pipeline.getID().getProtobuf().toByteArray(),
+      pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(),
           pipeline.getProtobufMessage().toByteArray());
       stateManager.addPipeline(pipeline);
-      // TODO: add pipeline to node manager
+      nodeManager.addPipeline(pipeline);
       return pipeline;
     } finally {
       lock.writeLock().unlock();
@@ -123,20 +122,20 @@ public class SCMPipelineManager implements PipelineManager {
   }
 
   @Override
-  public Pipeline createPipeline(ReplicationType type,
-                                 List<DatanodeDetails> nodes)
-      throws IOException {
+  public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor,
+                                 List<DatanodeDetails> nodes) {
     // This will mostly be used to create dummy pipeline for SimplePipelines.
     lock.writeLock().lock();
     try {
-      return pipelineFactory.create(type, nodes);
+      return pipelineFactory.create(type, factor, nodes);
     } finally {
       lock.writeLock().unlock();
     }
   }
 
   @Override
-  public Pipeline getPipeline(PipelineID pipelineID) throws IOException {
+  public Pipeline getPipeline(PipelineID pipelineID)
+      throws PipelineNotFoundException {
     lock.readLock().lock();
     try {
       return stateManager.getPipeline(pipelineID);
@@ -146,21 +145,21 @@ public class SCMPipelineManager implements PipelineManager {
   }
 
   @Override
-  public List<Pipeline> getPipelinesByType(ReplicationType type) {
+  public List<Pipeline> getPipelines(ReplicationType type) {
     lock.readLock().lock();
     try {
-      return stateManager.getPipelinesByType(type);
+      return stateManager.getPipelines(type);
     } finally {
       lock.readLock().unlock();
     }
   }
 
   @Override
-  public List<Pipeline> getPipelinesByTypeAndFactor(ReplicationType type,
+  public List<Pipeline> getPipelines(ReplicationType type,
       ReplicationFactor factor) {
     lock.readLock().lock();
     try {
-      return stateManager.getPipelinesByTypeAndFactor(type, factor);
+      return stateManager.getPipelines(type, factor);
     } finally {
       lock.readLock().unlock();
     }
@@ -232,9 +231,9 @@ public class SCMPipelineManager implements PipelineManager {
   public void removePipeline(PipelineID pipelineID) throws IOException {
     lock.writeLock().lock();
     try {
-      stateManager.removePipeline(pipelineID);
       pipelineStore.delete(pipelineID.getProtobuf().toByteArray());
-      // TODO: remove pipeline from node manager
+      Pipeline pipeline = stateManager.removePipeline(pipelineID);
+      nodeManager.removePipeline(pipeline);
     } finally {
       lock.writeLock().unlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
index c95fcfb..3e42df3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java
@@ -54,7 +54,7 @@ public class SimplePipelineProvider implements PipelineProvider {
     Collections.shuffle(dns);
     return Pipeline.newBuilder()
         .setId(PipelineID.randomId())
-        .setState(PipelineState.ALLOCATED)
+        .setState(PipelineState.OPEN)
         .setType(ReplicationType.STAND_ALONE)
         .setFactor(factor)
         .setNodes(dns.subList(0, factor.getNumber()))
@@ -62,16 +62,11 @@ public class SimplePipelineProvider implements PipelineProvider {
   }
 
   @Override
-  public Pipeline create(List<DatanodeDetails> nodes) throws IOException {
-    ReplicationFactor factor = ReplicationFactor.valueOf(nodes.size());
-    if (factor == null) {
-      throw new IOException(String
-          .format("Nodes size=%d does not match any replication factor",
-              nodes.size()));
-    }
+  public Pipeline create(ReplicationFactor factor,
+      List<DatanodeDetails> nodes) {
     return Pipeline.newBuilder()
         .setId(PipelineID.randomId())
-        .setState(PipelineState.ALLOCATED)
+        .setState(PipelineState.OPEN)
         .setType(ReplicationType.STAND_ALONE)
         .setFactor(factor)
         .setNodes(nodes)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineActionEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineActionEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineActionEventHandler.java
deleted file mode 100644
index 1053149..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineActionEventHandler.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipelines;
-
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineAction;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .PipelineActionsFromDatanode;
-
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handles pipeline actions from datanode.
- */
-public class PipelineActionEventHandler implements
-    EventHandler<PipelineActionsFromDatanode> {
-
-  public static final Logger LOG = LoggerFactory.getLogger(
-      PipelineActionEventHandler.class);
-
-  public PipelineActionEventHandler() {
-
-  }
-
-  @Override
-  public void onMessage(PipelineActionsFromDatanode report,
-      EventPublisher publisher) {
-    for (PipelineAction action : report.getReport().getPipelineActionsList()) {
-      switch (action.getAction()) {
-      case CLOSE:
-        PipelineID pipelineID = PipelineID.
-            getFromProtobuf(action.getClosePipeline().getPipelineID());
-        LOG.info("Closing pipeline " + pipelineID + " for reason:" + action
-            .getClosePipeline().getDetailedReason());
-        publisher.fireEvent(SCMEvents.PIPELINE_CLOSE, pipelineID);
-        break;
-      default:
-        LOG.error("unknown pipeline action:{}" + action.getAction());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java
deleted file mode 100644
index e49678f..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipelines;
-
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handles pipeline close event.
- */
-public class PipelineCloseHandler implements EventHandler<PipelineID> {
-  private static final Logger LOG = LoggerFactory
-          .getLogger(PipelineCloseHandler.class);
-
-  private final PipelineSelector pipelineSelector;
-  public PipelineCloseHandler(PipelineSelector pipelineSelector) {
-    this.pipelineSelector = pipelineSelector;
-  }
-
-  @Override
-  public void onMessage(PipelineID pipelineID, EventPublisher publisher) {
-    Pipeline pipeline = pipelineSelector.getPipeline(pipelineID);
-    try {
-      if (pipeline != null) {
-        pipelineSelector.finalizePipeline(pipeline);
-      } else {
-        LOG.debug("pipeline:{} not found", pipelineID);
-      }
-    } catch (Exception e) {
-      LOG.info("failed to close pipeline:{}", pipelineID, e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
deleted file mode 100644
index ca2e878..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.pipelines;
-
-import java.util.ArrayList;
-import java.util.LinkedList;
-
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-/**
- * Manage Ozone pipelines.
- */
-public abstract class PipelineManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(PipelineManager.class);
-  protected final ArrayList<ActivePipelines> activePipelines;
-
-  public PipelineManager() {
-    activePipelines = new ArrayList<>();
-    for (ReplicationFactor factor : ReplicationFactor.values()) {
-      activePipelines.add(factor.ordinal(), new ActivePipelines());
-    }
-  }
-
-  /**
-   * List of active pipelines.
-   */
-  public static class ActivePipelines {
-    private final List<PipelineID> activePipelines;
-    private final AtomicInteger pipelineIndex;
-
-    ActivePipelines() {
-      activePipelines = new LinkedList<>();
-      pipelineIndex = new AtomicInteger(0);
-    }
-
-    void addPipeline(PipelineID pipelineID) {
-      if (!activePipelines.contains(pipelineID)) {
-        activePipelines.add(pipelineID);
-      }
-    }
-
-    public void removePipeline(PipelineID pipelineID) {
-      activePipelines.remove(pipelineID);
-    }
-
-    /**
-     * Find a Pipeline that is operational.
-     *
-     * @return - Pipeline or null
-     */
-    PipelineID findOpenPipeline() {
-      if (activePipelines.size() == 0) {
-        LOG.error("No Operational pipelines found. Returning null.");
-        return null;
-      }
-      return activePipelines.get(getNextIndex());
-    }
-
-    /**
-     * gets the next index of the Pipeline to get.
-     *
-     * @return index in the link list to get.
-     */
-    private int getNextIndex() {
-      return pipelineIndex.incrementAndGet() % activePipelines.size();
-    }
-  }
-
-  /**
-   * This function is called by the Container Manager while allocating a new
-   * container. The client specifies what kind of replication pipeline is
-   * needed and based on the replication type in the request appropriate
-   * Interface is invoked.
-   *
-   * @param replicationFactor - Replication Factor
-   * @return a Pipeline.
-   */
-  public synchronized final PipelineID getPipeline(
-      ReplicationFactor replicationFactor, ReplicationType replicationType) {
-    PipelineID id =
-        activePipelines.get(replicationFactor.ordinal()).findOpenPipeline();
-    if (id != null) {
-      LOG.debug("re-used pipeline:{} for container with " +
-              "replicationType:{} replicationFactor:{}",
-          id, replicationType, replicationFactor);
-    }
-    if (id == null) {
-      LOG.error("Get pipeline call failed. We are not able to find" +
-              " operational pipeline.");
-      return null;
-    } else {
-      return id;
-    }
-  }
-
-  void addOpenPipeline(Pipeline pipeline) {
-    activePipelines.get(pipeline.getFactor().ordinal())
-            .addPipeline(pipeline.getId());
-  }
-
-  public abstract Pipeline allocatePipeline(
-      ReplicationFactor replicationFactor);
-
-  /**
-   * Initialize the pipeline.
-   * TODO: move the initialization to Ozone Client later
-   */
-  public abstract void initializePipeline(Pipeline pipeline) throws IOException;
-
-  public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) {
-    if (pipeline.addMember(dn)
-        &&(pipeline.getDatanodes().size() == pipeline.getFactor().getNumber())
-        && pipeline.getLifeCycleState() == HddsProtos.LifeCycleState.OPEN) {
-      addOpenPipeline(pipeline);
-    }
-  }
-
-  /**
-   * Creates a pipeline with a specified replication factor and type.
-   * @param replicationFactor - Replication Factor.
-   * @param replicationType - Replication Type.
-   */
-  public Pipeline createPipeline(ReplicationFactor replicationFactor,
-      ReplicationType replicationType) throws IOException {
-    Pipeline pipeline = allocatePipeline(replicationFactor);
-    if (pipeline != null) {
-      LOG.debug("created new pipeline:{} for container with "
-              + "replicationType:{} replicationFactor:{}",
-          pipeline.getId(), replicationType, replicationFactor);
-    }
-    return pipeline;
-  }
-
-  /**
-   * Remove the pipeline from active allocation.
-   * @param pipeline pipeline to be finalized
-   */
-  public abstract boolean finalizePipeline(Pipeline pipeline);
-
-  /**
-   *
-   * @param pipeline
-   */
-  public abstract void closePipeline(Pipeline pipeline) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java
deleted file mode 100644
index 933792b..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.pipelines;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.server
-        .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
-import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Handles Node Reports from datanode.
- */
-public class PipelineReportHandler implements
-        EventHandler<PipelineReportFromDatanode> {
-
-  private static final Logger LOGGER = LoggerFactory
-      .getLogger(PipelineReportHandler.class);
-  private final PipelineSelector pipelineSelector;
-
-  public PipelineReportHandler(PipelineSelector pipelineSelector) {
-    Preconditions.checkNotNull(pipelineSelector);
-    this.pipelineSelector = pipelineSelector;
-  }
-
-  @Override
-  public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode,
-      EventPublisher publisher) {
-    Preconditions.checkNotNull(pipelineReportFromDatanode);
-    DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails();
-    PipelineReportsProto pipelineReport =
-            pipelineReportFromDatanode.getReport();
-    Preconditions.checkNotNull(dn, "Pipeline Report is "
-        + "missing DatanodeDetails.");
-    LOGGER.trace("Processing pipeline report for dn: {}", dn);
-    pipelineSelector.processPipelineReport(dn, pipelineReport);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
deleted file mode 100644
index c8d22ff..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ /dev/null
@@ -1,481 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.pipelines;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementRandom;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipelines.ratis.RatisManagerImpl;
-import org.apache.hadoop.hdds.scm.pipelines.standalone.StandaloneManagerImpl;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.lease.Lease;
-import org.apache.hadoop.ozone.lease.LeaseException;
-import org.apache.hadoop.ozone.lease.LeaseManager;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.util.HashSet;
-import java.util.List;
-import java.util.HashMap;
-import java.util.Set;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .FAILED_TO_CHANGE_PIPELINE_STATE;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_FIND_ACTIVE_PIPELINE;
-import static org.apache.hadoop.hdds.server
-        .ServerUtils.getOzoneMetaDirPath;
-import static org.apache.hadoop.ozone
-        .OzoneConsts.SCM_PIPELINE_DB;
-
-/**
- * Sends the request to the right pipeline manager.
- */
-public class PipelineSelector {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(PipelineSelector.class);
-  private final ContainerPlacementPolicy placementPolicy;
-  private final Map<ReplicationType, PipelineManager> pipelineManagerMap;
-  private final Configuration conf;
-  private final EventPublisher eventPublisher;
-  private final long containerSize;
-  private final MetadataStore pipelineStore;
-  private final PipelineStateManager stateManager;
-  private final NodeManager nodeManager;
-  private final Map<PipelineID, HashSet<ContainerID>> pipeline2ContainerMap;
-  private final Map<PipelineID, Pipeline> pipelineMap;
-  private final LeaseManager<Pipeline> pipelineLeaseManager;
-
-  /**
-   * Constructs a pipeline Selector.
-   *
-   * @param nodeManager - node manager
-   * @param conf - Ozone Config
-   */
-  public PipelineSelector(NodeManager nodeManager, Configuration conf,
-      EventPublisher eventPublisher, int cacheSizeMB) throws IOException {
-    this.conf = conf;
-    this.eventPublisher = eventPublisher;
-    this.placementPolicy = createContainerPlacementPolicy(nodeManager, conf);
-    this.containerSize = (long)this.conf.getStorageSize(
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
-        ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT,
-        StorageUnit.BYTES);
-    pipelineMap = new ConcurrentHashMap<>();
-    pipelineManagerMap = new HashMap<>();
-
-    pipelineManagerMap.put(ReplicationType.STAND_ALONE,
-            new StandaloneManagerImpl(nodeManager, placementPolicy,
-            containerSize));
-    pipelineManagerMap.put(ReplicationType.RATIS,
-            new RatisManagerImpl(nodeManager, placementPolicy,
-                    containerSize, conf));
-    long pipelineCreationLeaseTimeout = conf.getTimeDuration(
-        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT,
-        ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    pipelineLeaseManager = new LeaseManager<>("PipelineCreation",
-        pipelineCreationLeaseTimeout);
-    pipelineLeaseManager.start();
-
-    stateManager = new PipelineStateManager();
-    this.nodeManager = nodeManager;
-    pipeline2ContainerMap = new HashMap<>();
-
-    // Write the container name to pipeline mapping.
-    File metaDir = getOzoneMetaDirPath(conf);
-    File containerDBPath = new File(metaDir, SCM_PIPELINE_DB);
-    pipelineStore = MetadataStoreBuilder.newBuilder()
-            .setConf(conf)
-            .setDbFile(containerDBPath)
-            .setCacheSize(cacheSizeMB * OzoneConsts.MB)
-            .build();
-
-    reloadExistingPipelines();
-  }
-
-  private void reloadExistingPipelines() throws IOException {
-    if (pipelineStore.isEmpty()) {
-      // Nothing to do just return
-      return;
-    }
-
-    List<Map.Entry<byte[], byte[]>> range =
-            pipelineStore.getSequentialRangeKVs(null, Integer.MAX_VALUE, null);
-
-    // Transform the values into the pipelines.
-    // TODO: filter by pipeline state
-    for (Map.Entry<byte[], byte[]> entry : range) {
-      Pipeline pipeline = Pipeline.getFromProtoBuf(
-                HddsProtos.Pipeline.PARSER.parseFrom(entry.getValue()));
-      Preconditions.checkNotNull(pipeline);
-      addExistingPipeline(pipeline);
-    }
-  }
-
-  @VisibleForTesting
-  public Set<ContainerID> getOpenContainerIDsByPipeline(PipelineID pipelineID) {
-    return pipeline2ContainerMap.get(pipelineID);
-  }
-
-  public void addContainerToPipeline(PipelineID pipelineID, long containerID) {
-    pipeline2ContainerMap.get(pipelineID)
-            .add(ContainerID.valueof(containerID));
-  }
-
-  public void removeContainerFromPipeline(PipelineID pipelineID,
-                                          long containerID) throws IOException {
-    pipeline2ContainerMap.get(pipelineID)
-            .remove(ContainerID.valueof(containerID));
-    closePipelineIfNoOpenContainers(pipelineMap.get(pipelineID));
-  }
-
-  /**
-   * Translates a list of nodes, ordered such that the first is the leader, into
-   * a corresponding {@link Pipeline} object.
-   *
-   * @param nodes - list of datanodes on which we will allocate the container.
-   * The first of the list will be the leader node.
-   * @return pipeline corresponding to nodes
-   */
-  public static Pipeline newPipelineFromNodes(
-      List<DatanodeDetails> nodes, ReplicationType replicationType,
-      ReplicationFactor replicationFactor, PipelineID id) {
-    Preconditions.checkNotNull(nodes);
-    Preconditions.checkArgument(nodes.size() > 0);
-    String leaderId = nodes.get(0).getUuidString();
-    // A new pipeline always starts in allocated state
-    Pipeline pipeline = new Pipeline(leaderId, LifeCycleState.ALLOCATED,
-        replicationType, replicationFactor, id);
-    for (DatanodeDetails node : nodes) {
-      pipeline.addMember(node);
-    }
-    return pipeline;
-  }
-
-  /**
-   * Create pluggable container placement policy implementation instance.
-   *
-   * @param nodeManager - SCM node manager.
-   * @param conf - configuration.
-   * @return SCM container placement policy implementation instance.
-   */
-  @SuppressWarnings("unchecked")
-  private static ContainerPlacementPolicy createContainerPlacementPolicy(
-      final NodeManager nodeManager, final Configuration conf) {
-    Class<? extends ContainerPlacementPolicy> implClass =
-        (Class<? extends ContainerPlacementPolicy>) conf.getClass(
-            ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
-            SCMContainerPlacementRandom.class);
-
-    try {
-      Constructor<? extends ContainerPlacementPolicy> ctor =
-          implClass.getDeclaredConstructor(NodeManager.class,
-              Configuration.class);
-      return ctor.newInstance(nodeManager, conf);
-    } catch (RuntimeException e) {
-      throw e;
-    } catch (InvocationTargetException e) {
-      throw new RuntimeException(implClass.getName()
-          + " could not be constructed.", e.getCause());
-    } catch (Exception e) {
-      LOG.error("Unhandled exception occurred, Placement policy will not be " +
-          "functional.");
-      throw new IllegalArgumentException("Unable to load " +
-          "ContainerPlacementPolicy", e);
-    }
-  }
-
-  /**
-   * This function is called by the Container Manager while allocating a new
-   * container. The client specifies what kind of replication pipeline is needed
-   * and based on the replication type in the request appropriate Interface is
-   * invoked.
-   */
-
-  public Pipeline getReplicationPipeline(ReplicationType replicationType,
-      HddsProtos.ReplicationFactor replicationFactor)
-      throws IOException {
-    PipelineManager manager = pipelineManagerMap.get(replicationType);
-    Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    LOG.debug("Getting replication pipeline forReplicationType {} :" +
-            " ReplicationFactor {}", replicationType.toString(),
-        replicationFactor.toString());
-
-    /**
-     * In the Ozone world, we have a very simple policy.
-     *
-     * 1. Try to create a pipeline if there are enough free nodes.
-     *
-     * 2. This allows all nodes to part of a pipeline quickly.
-     *
-     * 3. if there are not enough free nodes, return already allocated pipeline
-     * in a round-robin fashion.
-     *
-     * TODO: Might have to come up with a better algorithm than this.
-     * Create a new placement policy that returns pipelines in round robin
-     * fashion.
-     */
-    Pipeline pipeline =
-        manager.createPipeline(replicationFactor, replicationType);
-    if (pipeline == null) {
-      // try to return a pipeline from already allocated pipelines
-      PipelineID pipelineId =
-              manager.getPipeline(replicationFactor, replicationType);
-      if (pipelineId == null) {
-        throw new SCMException(FAILED_TO_FIND_ACTIVE_PIPELINE);
-      }
-      pipeline = pipelineMap.get(pipelineId);
-      Preconditions.checkArgument(pipeline.getLifeCycleState() ==
-              LifeCycleState.OPEN);
-    } else {
-      pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(),
-              pipeline.getProtobufMessage().toByteArray());
-      // if a new pipeline is created, initialize its state machine
-      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CREATE);
-
-      //TODO: move the initialization of pipeline to Ozone Client
-      manager.initializePipeline(pipeline);
-      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CREATED);
-    }
-    return pipeline;
-  }
-
-  /**
-   * This function to return pipeline for given pipeline id.
-   */
-  public Pipeline getPipeline(PipelineID pipelineID) {
-    return pipelineMap.get(pipelineID);
-  }
-
-  /**
-   * Finalize a given pipeline.
-   */
-  public void finalizePipeline(Pipeline pipeline) throws IOException {
-    PipelineManager manager = pipelineManagerMap.get(pipeline.getType());
-    Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    if (pipeline.getLifeCycleState() == LifeCycleState.CLOSING ||
-        pipeline.getLifeCycleState() == LifeCycleState.CLOSED) {
-      LOG.debug("pipeline:{} already in closing state, skipping",
-          pipeline.getId());
-      // already in closing/closed state
-      return;
-    }
-
-    // Remove the pipeline from active allocation
-    if (manager.finalizePipeline(pipeline)) {
-      LOG.info("Finalizing pipeline. pipelineID: {}", pipeline.getId());
-      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.FINALIZE);
-      closePipelineIfNoOpenContainers(pipeline);
-    }
-  }
-
-  /**
-   * Close a given pipeline.
-   */
-  private void closePipelineIfNoOpenContainers(Pipeline pipeline)
-      throws IOException {
-    if (pipeline.getLifeCycleState() != LifeCycleState.CLOSING) {
-      return;
-    }
-    HashSet<ContainerID> containerIDS =
-            pipeline2ContainerMap.get(pipeline.getId());
-    if (containerIDS.size() == 0) {
-      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.CLOSE);
-      LOG.info("Closing pipeline. pipelineID: {}", pipeline.getId());
-    }
-  }
-
-  /**
-   * Close a given pipeline.
-   */
-  private void closePipeline(Pipeline pipeline) throws IOException {
-    PipelineManager manager = pipelineManagerMap.get(pipeline.getType());
-    Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    LOG.debug("Closing pipeline. pipelineID: {}", pipeline.getId());
-    HashSet<ContainerID> containers =
-            pipeline2ContainerMap.get(pipeline.getId());
-    Preconditions.checkArgument(containers.size() == 0);
-    manager.closePipeline(pipeline);
-  }
-
-  /**
-   * Add to a given pipeline.
-   */
-  private void addOpenPipeline(Pipeline pipeline) {
-    PipelineManager manager = pipelineManagerMap.get(pipeline.getType());
-    Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
-    LOG.debug("Adding Open pipeline. pipelineID: {}", pipeline.getId());
-    manager.addOpenPipeline(pipeline);
-  }
-
-  private void closeContainersByPipeline(Pipeline pipeline) {
-    HashSet<ContainerID> containers =
-            pipeline2ContainerMap.get(pipeline.getId());
-    for (ContainerID id : containers) {
-      eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, id);
-    }
-  }
-
-  private void addExistingPipeline(Pipeline pipeline) throws IOException {
-    LifeCycleState state = pipeline.getLifeCycleState();
-    switch (state) {
-    case ALLOCATED:
-      // a pipeline in allocated state is only present in SCM and does not exist
-      // on datanode, on SCM restart, this pipeline can be ignored.
-      break;
-    case CREATING:
-    case OPEN:
-    case CLOSING:
-      //TODO: process pipeline report and move pipeline to active queue
-      // when all the nodes have reported.
-      pipelineMap.put(pipeline.getId(), pipeline);
-      pipeline2ContainerMap.put(pipeline.getId(), new HashSet<>());
-      nodeManager.addPipeline(pipeline);
-      // reset the datanodes in the pipeline
-      // they will be reset on
-      pipeline.resetPipeline();
-      break;
-    case CLOSED:
-      // if the pipeline is in closed state, nothing to do.
-      break;
-    default:
-      throw new IOException("invalid pipeline state:" + state);
-    }
-  }
-
-  public void handleStaleNode(DatanodeDetails dn) {
-    Set<PipelineID> pipelineIDs = nodeManager.getPipelineByDnID(dn.getUuid());
-    for (PipelineID id : pipelineIDs) {
-      LOG.info("closing pipeline {}.", id);
-      eventPublisher.fireEvent(SCMEvents.PIPELINE_CLOSE, id);
-    }
-  }
-
-  void processPipelineReport(DatanodeDetails dn,
-                                    PipelineReportsProto pipelineReport) {
-    Set<PipelineID> reportedPipelines = new HashSet<>();
-    pipelineReport.getPipelineReportList().
-            forEach(p ->
-                    reportedPipelines.add(
-                            processPipelineReport(p.getPipelineID(), dn)));
-
-    //TODO: handle missing pipelines and new pipelines later
-  }
-
-  private PipelineID processPipelineReport(
-          HddsProtos.PipelineID id, DatanodeDetails dn) {
-    PipelineID pipelineID = PipelineID.getFromProtobuf(id);
-    Pipeline pipeline = pipelineMap.get(pipelineID);
-    if (pipeline != null) {
-      pipelineManagerMap.get(pipeline.getType())
-              .processPipelineReport(pipeline, dn);
-    }
-    return pipelineID;
-  }
-
-  /**
-   * Update the Pipeline State to the next state.
-   *
-   * @param pipeline - Pipeline
-   * @param event - LifeCycle Event
-   * @throws SCMException  on Failure.
-   */
-  public void updatePipelineState(Pipeline pipeline,
-      HddsProtos.LifeCycleEvent event) throws IOException {
-    try {
-      switch (event) {
-      case CREATE:
-        pipelineMap.put(pipeline.getId(), pipeline);
-        pipeline2ContainerMap.put(pipeline.getId(), new HashSet<>());
-        nodeManager.addPipeline(pipeline);
-        // Acquire lease on pipeline
-        Lease<Pipeline> pipelineLease = pipelineLeaseManager.acquire(pipeline);
-        // Register callback to be executed in case of timeout
-        pipelineLease.registerCallBack(() -> {
-          updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.TIMEOUT);
-          return null;
-        });
-        break;
-      case CREATED:
-        // Release the lease on pipeline
-        pipelineLeaseManager.release(pipeline);
-        addOpenPipeline(pipeline);
-        break;
-
-      case FINALIZE:
-        closeContainersByPipeline(pipeline);
-        break;
-
-      case CLOSE:
-      case TIMEOUT:
-        closePipeline(pipeline);
-        pipeline2ContainerMap.remove(pipeline.getId());
-        nodeManager.removePipeline(pipeline);
-        pipelineMap.remove(pipeline.getId());
-        break;
-      default:
-        throw new SCMException("Unsupported pipeline LifeCycleEvent.",
-            FAILED_TO_CHANGE_PIPELINE_STATE);
-      }
-
-      stateManager.updatePipelineState(pipeline, event);
-      pipelineStore.put(pipeline.getId().getProtobuf().toByteArray(),
-              pipeline.getProtobufMessage().toByteArray());
-    } catch (LeaseException e) {
-      throw new IOException("Lease Exception.", e);
-    }
-  }
-
-  public void shutdown() throws IOException {
-    if (pipelineLeaseManager != null) {
-      pipelineLeaseManager.shutdown();
-    }
-
-    if (pipelineStore != null) {
-      pipelineStore.close();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineStateManager.java
deleted file mode 100644
index 6054f16..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineStateManager.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.pipelines;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.ozone.common.statemachine
-    .InvalidStateTransitionException;
-import org.apache.hadoop.ozone.common.statemachine.StateMachine;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .FAILED_TO_CHANGE_PIPELINE_STATE;
-
-/**
- * Manages Pipeline states.
- */
-public class PipelineStateManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(PipelineStateManager.class);
-
-  private final StateMachine<HddsProtos.LifeCycleState,
-      HddsProtos.LifeCycleEvent> stateMachine;
-
-  PipelineStateManager() {
-    // Initialize the container state machine.
-    Set<HddsProtos.LifeCycleState> finalStates = new HashSet<>();
-    // These are the steady states of a container.
-    finalStates.add(HddsProtos.LifeCycleState.OPEN);
-    finalStates.add(HddsProtos.LifeCycleState.CLOSED);
-
-    this.stateMachine = new StateMachine<>(HddsProtos.LifeCycleState.ALLOCATED,
-        finalStates);
-    initializeStateMachine();
-  }
-
-  /**
-   * Event and State Transition Mapping.
-   *
-   * State: ALLOCATED ---------------> CREATING
-   * Event:                CREATE
-   *
-   * State: CREATING  ---------------> OPEN
-   * Event:               CREATED
-   *
-   * State: OPEN      ---------------> CLOSING
-   * Event:               FINALIZE
-   *
-   * State: CLOSING   ---------------> CLOSED
-   * Event:                CLOSE
-   *
-   * State: CREATING  ---------------> CLOSED
-   * Event:               TIMEOUT
-   *
-   *
-   * Container State Flow:
-   *
-   * [ALLOCATED]---->[CREATING]------>[OPEN]-------->[CLOSING]
-   *            (CREATE)     | (CREATED)     (FINALIZE)   |
-   *                         |                            |
-   *                         |                            |
-   *                         |(TIMEOUT)                   |(CLOSE)
-   *                         |                            |
-   *                         +--------> [CLOSED] <--------+
-   */
-  private void initializeStateMachine() {
-    stateMachine.addTransition(HddsProtos.LifeCycleState.ALLOCATED,
-        HddsProtos.LifeCycleState.CREATING,
-        HddsProtos.LifeCycleEvent.CREATE);
-
-    stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING,
-        HddsProtos.LifeCycleState.OPEN,
-        HddsProtos.LifeCycleEvent.CREATED);
-
-    stateMachine.addTransition(HddsProtos.LifeCycleState.OPEN,
-        HddsProtos.LifeCycleState.CLOSING,
-        HddsProtos.LifeCycleEvent.FINALIZE);
-
-    stateMachine.addTransition(HddsProtos.LifeCycleState.CLOSING,
-        HddsProtos.LifeCycleState.CLOSED,
-        HddsProtos.LifeCycleEvent.CLOSE);
-
-    stateMachine.addTransition(HddsProtos.LifeCycleState.CREATING,
-        HddsProtos.LifeCycleState.CLOSED,
-        HddsProtos.LifeCycleEvent.TIMEOUT);
-  }
-
-
-  /**
-   * Update the Pipeline State to the next state.
-   *
-   * @param pipeline - Pipeline
-   * @param event - LifeCycle Event
-   * @throws SCMException  on Failure.
-   */
-  public void updatePipelineState(Pipeline pipeline,
-      HddsProtos.LifeCycleEvent event) throws IOException {
-    HddsProtos.LifeCycleState newState;
-    try {
-      newState = stateMachine.getNextState(pipeline.getLifeCycleState(), event);
-    } catch (InvalidStateTransitionException ex) {
-      String error = String.format("Failed to update pipeline state %s, " +
-              "reason: invalid state transition from state: %s upon " +
-              "event: %s.",
-          pipeline.getId(), pipeline.getLifeCycleState(), event);
-      LOG.error(error);
-      throw new SCMException(error, FAILED_TO_CHANGE_PIPELINE_STATE);
-    }
-
-    // This is a post condition after executing getNextState.
-    Preconditions.checkNotNull(newState);
-    Preconditions.checkNotNull(pipeline);
-    pipeline.setLifeCycleState(newState);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java
deleted file mode 100644
index ea24c58..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipelines;
-/**
- Ozone supports the notion of different kind of pipelines.
- That means that we can have a replication pipeline build on
- Ratis, Standalone or some other protocol. All Pipeline managers
- the entities in charge of pipelines reside in the package.
-
- Here is the high level Arch.
-
- 1. A pipeline selector class is instantiated in the Container manager class.
-
- 2. A client when creating a container -- will specify what kind of
- replication type it wants to use. We support 2 types now, Ratis and StandAlone.
-
- 3. Based on the replication type, the pipeline selector class asks the
- corresponding pipeline manager for a pipeline.
-
- 4. We have supported the ability for clients to specify a set of nodes in
- the pipeline or rely in the pipeline manager to select the datanodes if they
- are not specified.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
deleted file mode 100644
index 905a5b5..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p/>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p/>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.pipelines.ratis;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.XceiverClientRatis;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Implementation of {@link PipelineManager}.
- *
- * TODO : Introduce a state machine.
- */
-public class RatisManagerImpl extends PipelineManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RatisManagerImpl.class);
-  private final Configuration conf;
-  private final NodeManager nodeManager;
-  private final Set<DatanodeDetails> ratisMembers;
-
-  /**
-   * Constructs a Ratis Pipeline Manager.
-   *
-   * @param nodeManager
-   */
-  public RatisManagerImpl(NodeManager nodeManager,
-      ContainerPlacementPolicy placementPolicy, long size, Configuration conf) {
-    super();
-    this.conf = conf;
-    this.nodeManager = nodeManager;
-    ratisMembers = new HashSet<>();
-  }
-
-  /**
-   * Allocates a new ratis Pipeline from the free nodes.
-   *
-   * @param factor - One or Three
-   * @return Pipeline.
-   */
-  public Pipeline allocatePipeline(ReplicationFactor factor) {
-    List<DatanodeDetails> newNodesList = new LinkedList<>();
-    List<DatanodeDetails> datanodes = nodeManager.getNodes(NodeState.HEALTHY);
-    //TODO: Add Raft State to the Nodes, so we can query and skip nodes from
-    // data from datanode instead of maintaining a set.
-    for (DatanodeDetails datanode : datanodes) {
-      Preconditions.checkNotNull(datanode);
-      if (!ratisMembers.contains(datanode)) {
-        newNodesList.add(datanode);
-        if (newNodesList.size() == factor.getNumber()) {
-          // once a datanode has been added to a pipeline, exclude it from
-          // further allocations
-          ratisMembers.addAll(newNodesList);
-          PipelineID pipelineID = PipelineID.randomId();
-          LOG.info("Allocating a new ratis pipeline of size: {} id: {}",
-                  factor.getNumber(), pipelineID);
-          return PipelineSelector.newPipelineFromNodes(newNodesList,
-              ReplicationType.RATIS, factor, pipelineID);
-        }
-      }
-    }
-    return null;
-  }
-
-  public void initializePipeline(Pipeline pipeline) throws IOException {
-    //TODO:move the initialization from SCM to client
-    try (XceiverClientRatis client =
-        XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
-      client.createPipeline();
-    }
-  }
-
-  public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) {
-    super.processPipelineReport(pipeline, dn);
-    ratisMembers.add(dn);
-  }
-
-  public synchronized boolean finalizePipeline(Pipeline pipeline) {
-    activePipelines.get(pipeline.getFactor().ordinal())
-            .removePipeline(pipeline.getId());
-    return true;
-  }
-
-  /**
-   * Close the pipeline.
-   */
-  public void closePipeline(Pipeline pipeline) throws IOException {
-    try (XceiverClientRatis client =
-        XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) {
-      client.destroyPipeline();
-    }
-    for (DatanodeDetails node : pipeline.getMachines()) {
-      // A node should always be the in ratis members list.
-      Preconditions.checkArgument(ratisMembers.remove(node));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java
deleted file mode 100644
index 2970fb3..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipelines.ratis;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
deleted file mode 100644
index 045afb6..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.pipelines.standalone;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .ContainerPlacementPolicy;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Set;
-
-/**
- * Standalone Manager Impl to prove that pluggable interface
- * works with current tests.
- */
-public class StandaloneManagerImpl extends PipelineManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(StandaloneManagerImpl.class);
-  private final NodeManager nodeManager;
-  private final ContainerPlacementPolicy placementPolicy;
-  private final long containerSize;
-  private final Set<DatanodeDetails> standAloneMembers;
-
-  /**
-   * Constructor for Standalone Node Manager Impl.
-   * @param nodeManager - Node Manager.
-   * @param placementPolicy - Placement Policy
-   * @param containerSize - Container Size.
-   */
-  public StandaloneManagerImpl(NodeManager nodeManager,
-      ContainerPlacementPolicy placementPolicy, long containerSize) {
-    super();
-    this.nodeManager = nodeManager;
-    this.placementPolicy = placementPolicy;
-    this.containerSize =  containerSize;
-    this.standAloneMembers = new HashSet<>();
-  }
-
-
-  /**
-   * Allocates a new standalone Pipeline from the free nodes.
-   *
-   * @param factor - One
-   * @return Pipeline.
-   */
-  public Pipeline allocatePipeline(ReplicationFactor factor) {
-    List<DatanodeDetails> newNodesList = new LinkedList<>();
-    List<DatanodeDetails> datanodes = nodeManager.getNodes(NodeState.HEALTHY);
-    for (DatanodeDetails datanode : datanodes) {
-      Preconditions.checkNotNull(datanode);
-      if (!standAloneMembers.contains(datanode)) {
-        newNodesList.add(datanode);
-        if (newNodesList.size() == factor.getNumber()) {
-          // once a datanode has been added to a pipeline, exclude it from
-          // further allocations
-          standAloneMembers.addAll(newNodesList);
-          // Standalone pipeline use node id as pipeline
-          PipelineID pipelineID =
-                  PipelineID.valueOf(newNodesList.get(0).getUuid());
-          LOG.info("Allocating a new standalone pipeline of size: {} id: {}",
-              factor.getNumber(), pipelineID);
-          return PipelineSelector.newPipelineFromNodes(newNodesList,
-              ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineID);
-        }
-      }
-    }
-    return null;
-  }
-
-  public void initializePipeline(Pipeline pipeline) {
-    // Nothing to be done for standalone pipeline
-  }
-
-  public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) {
-    super.processPipelineReport(pipeline, dn);
-    standAloneMembers.add(dn);
-  }
-
-  public synchronized boolean finalizePipeline(Pipeline pipeline) {
-    activePipelines.get(pipeline.getFactor().ordinal())
-            .removePipeline(pipeline.getId());
-    return false;
-  }
-
-  /**
-   * Close the pipeline.
-   */
-  public void closePipeline(Pipeline pipeline) throws IOException {
-    for (DatanodeDetails node : pipeline.getMachines()) {
-      // A node should always be the in standalone members list.
-      Preconditions.checkArgument(standAloneMembers.remove(node));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java
deleted file mode 100644
index b2c3ca40..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.pipelines.standalone;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 89a6c81..e92200a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HDFS-14016. [SBN read] ObserverReadProxyProvider should enable observer read by default. Contributed by Chen Liang.

Posted by su...@apache.org.
HDFS-14016. [SBN read] ObserverReadProxyProvider should enable observer read by default. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b5277fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b5277fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b5277fd

Branch: refs/heads/HDFS-12943
Commit: 8b5277fd1184802fcdf1e4673d56d238ce36a826
Parents: 32551b4
Author: Chao Sun <su...@apache.org>
Authored: Wed Oct 31 11:22:02 2018 -0700
Committer: Chao Sun <su...@apache.org>
Committed: Wed Oct 31 11:22:11 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java  | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b5277fd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
index 87ca718..17bad65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProvider.java
@@ -141,6 +141,8 @@ public class ObserverReadProxyProvider<T extends ClientProtocol>
         ObserverReadInvocationHandler.class.getClassLoader(),
         new Class<?>[] { xface }, new ObserverReadInvocationHandler());
     combinedProxy = new ProxyInfo<>(wrappedProxy, combinedInfo.toString());
+    // TODO : make this configurable or remove this variable
+    this.observerReadEnabled = true;
   }
 
   public AlignmentContext getAlignmentContext() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-6729. Clarify documentation on how to enable cgroup support. Contributed by Zhankun Tang

Posted by su...@apache.org.
YARN-6729. Clarify documentation on how to enable cgroup support. Contributed by Zhankun Tang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/277a3d8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/277a3d8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/277a3d8d

Branch: refs/heads/HDFS-12943
Commit: 277a3d8d9fe1127c75452d083ff7859c603e686d
Parents: d36012b
Author: Shane Kumpf <sk...@apache.org>
Authored: Tue Oct 30 11:36:55 2018 -0600
Committer: Shane Kumpf <sk...@apache.org>
Committed: Tue Oct 30 11:36:55 2018 -0600

----------------------------------------------------------------------
 .../hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/277a3d8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md
index 4a83dce..7a48f6d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCgroups.md
@@ -29,13 +29,13 @@ The following settings are related to setting up CGroups. These need to be set i
 |Configuration Name | Description |
 |:---- |:---- |
 | `yarn.nodemanager.container-executor.class` | This should be set to "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor". CGroups is a Linux kernel feature and is exposed via the LinuxContainerExecutor. |
-| `yarn.nodemanager.linux-container-executor.resources-handler.class` | This should be set to "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler". Using the LinuxContainerExecutor doesn't force you to use CGroups. If you wish to use CGroups, the resource-handler-class must be set to CGroupsLCEResourceHandler. |
+| `yarn.nodemanager.linux-container-executor.resources-handler.class` | This should be set to "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler". Using the LinuxContainerExecutor doesn't force you to use CGroups. If you wish to use CGroups, the resource-handler-class must be set to CGroupsLCEResourceHandler. DefaultLCEResourcesHandler won't work. |
 | `yarn.nodemanager.linux-container-executor.cgroups.hierarchy` | The cgroups hierarchy under which to place YARN proccesses(cannot contain commas). If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have been pre-configured) and the YARN user has write access to the parent directory, then the directory will be created. If the directory already exists, the administrator has to give YARN write permissions to it recursively. |
 | `yarn.nodemanager.linux-container-executor.cgroups.mount` | Whether the LCE should attempt to mount cgroups if not found - can be true or false. |
 | `yarn.nodemanager.linux-container-executor.cgroups.mount-path` | Optional. Where CGroups are located. LCE will try to mount them here, if `yarn.nodemanager.linux-container-executor.cgroups.mount` is true. LCE will try to use CGroups from this location, if `yarn.nodemanager.linux-container-executor.cgroups.mount` is false. If specified, this path and its subdirectories (CGroup hierarchies) must exist and they should be readable and writable by YARN before the NodeManager is launched. See CGroups mount options below for details. |
 | `yarn.nodemanager.linux-container-executor.group` | The Unix group of the NodeManager. It should match the setting in "container-executor.cfg". This configuration is required for validating the secure access of the container-executor binary. |
 
-The following settings are related to limiting resource usage of YARN containers:
+Once CGroups enabled, the following settings related to limiting resource usage of YARN containers can works:
 
 |Configuration Name | Description |
 |:---- |:---- |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HDDS-694. Plugin new Pipeline management code in SCM. Contributed by Lokesh Jain.

Posted by su...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index e260924..52340a9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -140,7 +140,7 @@ public class TestStorageContainerManager {
         if (expectPermissionDenied) {
           fail("Operation should fail, expecting an IOException here.");
         } else {
-          Assert.assertEquals(1, container2.getPipeline().getMachines().size());
+          Assert.assertEquals(1, container2.getPipeline().getNodes().size());
         }
       } catch (Exception e) {
         verifyPermissionDeniedException(e, fakeRemoteUsername);
@@ -153,7 +153,7 @@ public class TestStorageContainerManager {
         if (expectPermissionDenied) {
           fail("Operation should fail, expecting an IOException here.");
         } else {
-          Assert.assertEquals(1, container3.getPipeline().getMachines().size());
+          Assert.assertEquals(1, container3.getPipeline().getNodes().size());
         }
       } catch (Exception e) {
         verifyPermissionDeniedException(e, fakeRemoteUsername);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 217d3f4..0051ecb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -162,9 +162,10 @@ public class TestStorageContainerManagerHelper {
         .getStorageContainerManager().getClientProtocolServer()
         .getContainerWithPipeline(containerID);
 
-    DatanodeDetails leadDN = containerWithPipeline.getPipeline().getLeader();
+    DatanodeDetails dn =
+        containerWithPipeline.getPipeline().getFirstNode();
     OzoneContainer containerServer =
-        getContainerServerByDatanodeUuid(leadDN.getUuidString());
+        getContainerServerByDatanodeUuid(dn.getUuidString());
     KeyValueContainerData containerData =
         (KeyValueContainerData) containerServer.getContainerSet()
         .getContainer(containerID).getContainerData();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index 26ece8b..fe060a6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.protocol.StorageType;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.*;
 import org.apache.hadoop.ozone.client.*;
 import org.apache.hadoop.hdds.client.OzoneQuota;
@@ -449,7 +449,7 @@ public class TestOzoneRestClient {
         .getContainerManager().getContainerWithPipeline(
             ContainerID.valueof(containerID))
         .getPipeline();
-    List<DatanodeDetails> datanodes = pipeline.getMachines();
+    List<DatanodeDetails> datanodes = pipeline.getNodes();
     Assert.assertEquals(datanodes.size(), 1);
 
     DatanodeDetails datanodeDetails = datanodes.get(0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
index ee9919d..76f6f8c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.
     StorageContainerException;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -306,7 +306,7 @@ public class TestCloseContainerHandlingByClient {
               .getContainerWithPipeline(ContainerID.valueof(containerID))
               .getPipeline();
       pipelineList.add(pipeline);
-      List<DatanodeDetails> datanodes = pipeline.getMachines();
+      List<DatanodeDetails> datanodes = pipeline.getNodes();
       for (DatanodeDetails details : datanodes) {
         Assert.assertFalse(ContainerTestHelper
             .isContainerClosed(cluster, containerID, details));
@@ -319,7 +319,7 @@ public class TestCloseContainerHandlingByClient {
     int index = 0;
     for (long containerID : containerIdList) {
       Pipeline pipeline = pipelineList.get(index);
-      List<DatanodeDetails> datanodes = pipeline.getMachines();
+      List<DatanodeDetails> datanodes = pipeline.getNodes();
       for (DatanodeDetails datanodeDetails : datanodes) {
         GenericTestUtils.waitFor(() -> ContainerTestHelper
                 .isContainerClosed(cluster, containerID, datanodeDetails), 500,
@@ -352,7 +352,7 @@ public class TestCloseContainerHandlingByClient {
     List<DatanodeDetails> datanodes =
         cluster.getStorageContainerManager().getContainerManager()
             .getContainerWithPipeline(ContainerID.valueof(containerID))
-            .getPipeline().getMachines();
+            .getPipeline().getNodes();
     Assert.assertEquals(1, datanodes.size());
     waitForContainerClose(keyName, key, HddsProtos.ReplicationType.STAND_ALONE);
     dataString = fixedLengthString(keyString, (1 * blockSize));
@@ -455,7 +455,7 @@ public class TestCloseContainerHandlingByClient {
     List<DatanodeDetails> datanodes =
         cluster.getStorageContainerManager().getContainerManager()
             .getContainerWithPipeline(ContainerID.valueof(containerID))
-            .getPipeline().getMachines();
+            .getPipeline().getNodes();
     Assert.assertEquals(1, datanodes.size());
     // move the container on the datanode to Closing state, this will ensure
     // closing the key will hit BLOCK_NOT_COMMITTED_EXCEPTION while trying

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index d507303..0b51bb3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientRatis;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.*;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.client.*;
@@ -649,7 +649,7 @@ public class TestOzoneRpcClient {
         cluster.getStorageContainerManager().getContainerManager()
             .getContainerWithPipeline(new ContainerID(containerID));
     Pipeline pipeline = container.getPipeline();
-    List<DatanodeDetails> datanodes = pipeline.getMachines();
+    List<DatanodeDetails> datanodes = pipeline.getNodes();
 
     DatanodeDetails datanodeDetails = datanodes.get(0);
     Assert.assertNotNull(datanodeDetails);
@@ -754,7 +754,7 @@ public class TestOzoneRpcClient {
         .getContainerManager().getContainerWithPipeline(
             ContainerID.valueof(containerID))
         .getPipeline();
-    List<DatanodeDetails> datanodes = pipeline.getMachines();
+    List<DatanodeDetails> datanodes = pipeline.getNodes();
     Assert.assertEquals(datanodes.size(), 1);
 
     DatanodeDetails datanodeDetails = datanodes.get(0);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 3969ddd..bde3bc9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -21,7 +21,8 @@ package org.apache.hadoop.ozone.container;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.HddsUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -35,14 +36,12 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.junit.Assert;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -128,17 +127,16 @@ public final class ContainerTestHelper {
   public static Pipeline createPipeline(
       Iterable<DatanodeDetails> ids) throws IOException {
     Objects.requireNonNull(ids, "ids == null");
-    final Iterator<DatanodeDetails> i = ids.iterator();
-    Preconditions.checkArgument(i.hasNext());
-    final DatanodeDetails leader = i.next();
-    final Pipeline pipeline =
-        new Pipeline(leader.getUuidString(), LifeCycleState.OPEN,
-            ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
-            PipelineID.randomId());
-    pipeline.addMember(leader);
-    for(; i.hasNext();) {
-      pipeline.addMember(i.next());
-    }
+    Preconditions.checkArgument(ids.iterator().hasNext());
+    List<DatanodeDetails> dns = new ArrayList<>();
+    ids.forEach(dns::add);
+    Pipeline pipeline = Pipeline.newBuilder()
+        .setState(Pipeline.PipelineState.OPEN)
+        .setId(PipelineID.randomId())
+        .setType(HddsProtos.ReplicationType.STAND_ALONE)
+        .setFactor(ReplicationFactor.ONE)
+        .setNodes(dns)
+        .build();
     return pipeline;
   }
 
@@ -218,7 +216,7 @@ public final class ContainerTestHelper {
     request.setContainerID(blockID.getContainerID());
     request.setWriteChunk(writeRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
 
     return request.build();
   }
@@ -260,7 +258,7 @@ public final class ContainerTestHelper {
     request.setContainerID(blockID.getContainerID());
     request.setPutSmallFile(smallFileRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     return request.build();
   }
 
@@ -279,7 +277,7 @@ public final class ContainerTestHelper {
     request.setContainerID(getKey.getGetBlock().getBlockID().getContainerID());
     request.setGetSmallFile(smallFileRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     return request.build();
   }
 
@@ -309,7 +307,7 @@ public final class ContainerTestHelper {
     newRequest.setContainerID(readRequest.getBlockID().getContainerID());
     newRequest.setReadChunk(readRequest);
     newRequest.setTraceID(UUID.randomUUID().toString());
-    newRequest.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    newRequest.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     return newRequest.build();
   }
 
@@ -342,7 +340,7 @@ public final class ContainerTestHelper {
     request.setContainerID(writeRequest.getBlockID().getContainerID());
     request.setDeleteChunk(deleteRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     return request.build();
   }
 
@@ -363,7 +361,7 @@ public final class ContainerTestHelper {
     request.setCreateContainer(
         ContainerProtos.CreateContainerRequestProto.getDefaultInstance());
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
 
     return request.build();
   }
@@ -398,7 +396,7 @@ public final class ContainerTestHelper {
     request.setContainerID(containerID);
     request.setUpdateContainer(updateRequestBuilder.build());
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     return request.build();
   }
   /**
@@ -427,7 +425,8 @@ public final class ContainerTestHelper {
    * @return - Request
    */
   public static ContainerCommandRequestProto getPutBlockRequest(
-      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest) {
+      Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest)
+      throws IOException {
     LOG.trace("putBlock: {} to pipeline={}",
         writeRequest.getBlockID());
 
@@ -448,7 +447,7 @@ public final class ContainerTestHelper {
     request.setContainerID(blockData.getContainerID());
     request.setPutBlock(putRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     return request.build();
   }
 
@@ -460,7 +459,8 @@ public final class ContainerTestHelper {
    * immediately.
    */
   public static ContainerCommandRequestProto getBlockRequest(
-      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) {
+      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest)
+      throws IOException {
     ContainerProtos.DatanodeBlockID blockID =
         putBlockRequest.getBlockData().getBlockID();
     LOG.trace("getKey: blockID={}", blockID);
@@ -475,7 +475,7 @@ public final class ContainerTestHelper {
     request.setContainerID(blockID.getContainerID());
     request.setGetBlock(getRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     return request.build();
   }
 
@@ -499,7 +499,8 @@ public final class ContainerTestHelper {
    * @return - Request
    */
   public static ContainerCommandRequestProto getDeleteBlockRequest(
-      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) {
+      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest)
+      throws IOException {
     ContainerProtos.DatanodeBlockID blockID = putBlockRequest.getBlockData()
         .getBlockID();
     LOG.trace("deleteBlock: name={}", blockID);
@@ -512,7 +513,7 @@ public final class ContainerTestHelper {
     request.setContainerID(blockID.getContainerID());
     request.setDeleteBlock(delRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     return request.build();
   }
 
@@ -523,7 +524,7 @@ public final class ContainerTestHelper {
    * @return ContainerCommandRequestProto.
    */
   public static ContainerCommandRequestProto getCloseContainer(
-      Pipeline pipeline, long containerID) {
+      Pipeline pipeline, long containerID) throws IOException {
     ContainerProtos.ContainerCommandRequestProto cmd =
         ContainerCommandRequestProto.newBuilder()
             .setCmdType(ContainerProtos.Type.CloseContainer)
@@ -531,7 +532,7 @@ public final class ContainerTestHelper {
             .setCloseContainer(
                 ContainerProtos.CloseContainerRequestProto.getDefaultInstance())
             .setTraceID(UUID.randomUUID().toString())
-            .setDatanodeUuid(pipeline.getLeader().getUuidString())
+            .setDatanodeUuid(pipeline.getFirstNode().getUuidString())
             .build();
 
     return cmd;
@@ -544,7 +545,7 @@ public final class ContainerTestHelper {
    * @return ContainerCommandRequestProto without traceId.
    */
   public static ContainerCommandRequestProto getRequestWithoutTraceId(
-      Pipeline pipeline, long containerID) {
+      Pipeline pipeline, long containerID) throws IOException {
     Preconditions.checkNotNull(pipeline);
     ContainerProtos.ContainerCommandRequestProto cmd =
         ContainerCommandRequestProto.newBuilder()
@@ -552,7 +553,7 @@ public final class ContainerTestHelper {
             .setContainerID(containerID)
             .setCloseContainer(
                 ContainerProtos.CloseContainerRequestProto.getDefaultInstance())
-            .setDatanodeUuid(pipeline.getLeader().getUuidString())
+            .setDatanodeUuid(pipeline.getFirstNode().getUuidString())
             .build();
     return cmd;
   }
@@ -563,7 +564,8 @@ public final class ContainerTestHelper {
    * @return ContainerCommandRequestProto.
    */
   public static ContainerCommandRequestProto getDeleteContainer(
-      Pipeline pipeline, long containerID, boolean forceDelete) {
+      Pipeline pipeline, long containerID, boolean forceDelete)
+      throws IOException {
     Preconditions.checkNotNull(pipeline);
     ContainerProtos.DeleteContainerRequestProto deleteRequest =
         ContainerProtos.DeleteContainerRequestProto.newBuilder().
@@ -575,7 +577,7 @@ public final class ContainerTestHelper {
             ContainerProtos.DeleteContainerRequestProto.getDefaultInstance())
         .setDeleteContainer(deleteRequest)
         .setTraceID(UUID.randomUUID().toString())
-        .setDatanodeUuid(pipeline.getLeader().getUuidString())
+        .setDatanodeUuid(pipeline.getFirstNode().getUuidString())
         .build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index d8a7d53..1789e55 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .DatanodeBlockID;
 import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
@@ -123,7 +123,7 @@ public class TestContainerReplication {
     cluster.getStorageContainerManager().getScmNodeManager()
         .addDatanodeCommand(destinationDatanode.getDatanodeDetails().getUuid(),
             new ReplicateContainerCommand(containerId,
-                sourcePipelines.getMachines()));
+                sourcePipelines.getNodes()));
 
     Thread.sleep(3000);
 
@@ -163,7 +163,7 @@ public class TestContainerReplication {
   private HddsDatanodeService chooseDatanodeWithoutContainer(Pipeline pipeline,
       List<HddsDatanodeService> dataNodes) {
     for (HddsDatanodeService datanode : dataNodes) {
-      if (!pipeline.getMachines().contains(datanode.getDatanodeDetails())) {
+      if (!pipeline.getNodes().contains(datanode.getDatanodeDetails())) {
         return datanode;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
index 85148e1..360b683 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -144,7 +144,7 @@ public class TestCloseContainerHandler {
       request.setContainerID(blockID.getContainerID());
       request.setWriteChunk(writeRequest);
       request.setTraceID(UUID.randomUUID().toString());
-      request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+      request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
       dispatcher.dispatch(request.build());
       chunkList.add(info);
     }
@@ -179,7 +179,7 @@ public class TestCloseContainerHandler {
     request.setContainerID(blockID.getContainerID());
     request.setPutBlock(putBlockRequestProto);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     dispatcher.dispatch(request.build());
 
     //the open block should be removed from Map
@@ -217,7 +217,7 @@ public class TestCloseContainerHandler {
     request.setDeleteChunk(deleteChunkProto);
     request.setWriteChunk(writeRequest);
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     dispatcher.dispatch(request.build());
     Assert.assertTrue(
         openContainerBlockMap.getBlockDataMap(testContainerID)
@@ -250,7 +250,7 @@ public class TestCloseContainerHandler {
     request.setCloseContainer(
         ContainerProtos.CloseContainerRequestProto.getDefaultInstance());
     request.setTraceID(UUID.randomUUID().toString());
-    request.setDatanodeUuid(pipeline.getLeader().getUuidString());
+    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
     dispatcher.dispatch(request.build());
     Assert.assertNull(
         openContainerBlockMap.getBlockDataMap(testContainerID));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 62cc5b2..aada723 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
@@ -106,7 +106,7 @@ public class TestCloseContainerByPipeline {
         .getContainerManager().getContainerWithPipeline(
             ContainerID.valueof(containerID))
         .getPipeline();
-    List<DatanodeDetails> datanodes = pipeline.getMachines();
+    List<DatanodeDetails> datanodes = pipeline.getNodes();
     Assert.assertEquals(datanodes.size(), 1);
 
     DatanodeDetails datanodeDetails = datanodes.get(0);
@@ -162,7 +162,7 @@ public class TestCloseContainerByPipeline {
         .getContainerManager().getContainerWithPipeline(
             ContainerID.valueof(containerID))
         .getPipeline();
-    List<DatanodeDetails> datanodes = pipeline.getMachines();
+    List<DatanodeDetails> datanodes = pipeline.getNodes();
     Assert.assertEquals(datanodes.size(), 1);
 
     DatanodeDetails datanodeDetails = datanodes.get(0);
@@ -220,7 +220,7 @@ public class TestCloseContainerByPipeline {
         .getContainerManager().getContainerWithPipeline(
             ContainerID.valueof(containerID))
         .getPipeline();
-    List<DatanodeDetails> datanodes = pipeline.getMachines();
+    List<DatanodeDetails> datanodes = pipeline.getNodes();
     Assert.assertEquals(3, datanodes.size());
 
     GenericTestUtils.LogCapturer logCapturer =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index 4cd42ab..f3ce899 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index 2c94f3b..a5a9641 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.scm.*;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.RatisTestHelper;
@@ -93,7 +93,7 @@ public class TestCSMMetrics {
       final OzoneConfiguration conf = new OzoneConfiguration();
       initConf.accept(pipeline, conf);
 
-      for (DatanodeDetails dn : pipeline.getMachines()) {
+      for (DatanodeDetails dn : pipeline.getNodes()) {
         final XceiverServerSpi s = createServer.apply(dn, conf);
         servers.add(s);
         s.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index aac908d..d4f7ae5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.container.replication.GrpcReplicationService;
 import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -75,7 +75,7 @@ public class TestContainerMetrics {
           .createSingleNodePipeline();
       OzoneConfiguration conf = new OzoneConfiguration();
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getLeader()
+          pipeline.getFirstNode()
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
       conf.setInt(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
           interval);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index a3c92fb..f7ba979 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -66,7 +66,8 @@ public class TestOzoneContainer {
       // independently in our test path.
       Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
       conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
-      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader()
+      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+          pipeline.getFirstNode()
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
       conf.setBoolean(
           OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
@@ -108,7 +109,7 @@ public class TestOzoneContainer {
       Pipeline pipeline =
           ContainerTestHelper.createSingleNodePipeline();
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getLeader()
+          pipeline.getFirstNode()
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
 
       cluster = MiniOzoneCluster.newBuilder(conf)
@@ -514,7 +515,7 @@ public class TestOzoneContainer {
     Pipeline pipeline =
         ContainerTestHelper.createSingleNodePipeline();
     conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-        pipeline.getLeader()
+        pipeline.getFirstNode()
             .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
 
     // This client talks to ozone container via datanode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
index 0f5d21f..13e41e2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.RatisTestHelper;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.util.CheckedBiConsumer;
@@ -88,12 +88,12 @@ public class TestOzoneContainerRatis {
       // Create Ratis cluster
 //      final String ratisId = "ratis1";
 //      final PipelineManager manager = RatisManagerImpl.newRatisManager(conf);
-//      manager.createPipeline(ratisId, pipeline.getMachines());
+//      manager.createPipeline(ratisId, pipeline.getNodes());
 //      LOG.info("Created RatisCluster " + ratisId);
 //
 //      // check Ratis cluster members
 //      final List<DatanodeDetails> dns = manager.getMembers(ratisId);
-//      Assert.assertEquals(pipeline.getMachines(), dns);
+//      Assert.assertEquals(pipeline.getNodes(), dns);
 //
 //      // run test
 //      final XceiverClientSpi client = XceiverClientRatis

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index e6ebbf1..33e3e1a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientGrpc;
 import org.apache.hadoop.hdds.scm.XceiverClientRatis;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.util.CheckedBiConsumer;
@@ -56,7 +56,6 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.function.BiConsumer;
 
 import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
 import static org.apache.ratis.rpc.SupportedRpcType.NETTY;
@@ -80,15 +79,15 @@ public class TestContainerServer {
   public void testClientServer() throws Exception {
     DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
     ContainerSet containerSet = new ContainerSet();
-    runTestClientServer(1,
-        (pipeline, conf) -> conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-            pipeline.getLeader()
-                .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
+    runTestClientServer(1, (pipeline, conf) -> conf
+            .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+                pipeline.getFirstNode()
+                    .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()),
         XceiverClientGrpc::new,
         (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf,
             new TestContainerDispatcher(),
-            createReplicationService(containerSet)),
-        (dn, p) -> {});
+            createReplicationService(containerSet)), (dn, p) -> {
+        });
   }
 
   @FunctionalInterface
@@ -131,7 +130,7 @@ public class TestContainerServer {
 
   static void runTestClientServer(
       int numDatanodes,
-      BiConsumer<Pipeline, OzoneConfiguration> initConf,
+      CheckedBiConsumer<Pipeline, OzoneConfiguration, IOException> initConf,
       CheckedBiFunction<Pipeline, OzoneConfiguration, XceiverClientSpi,
           IOException> createClient,
       CheckedBiFunction<DatanodeDetails, OzoneConfiguration, XceiverServerSpi,
@@ -146,7 +145,7 @@ public class TestContainerServer {
       final OzoneConfiguration conf = new OzoneConfiguration();
       initConf.accept(pipeline, conf);
 
-      for(DatanodeDetails dn : pipeline.getMachines()) {
+      for (DatanodeDetails dn : pipeline.getNodes()) {
         final XceiverServerSpi s = createServer.apply(dn, conf);
         servers.add(s);
         s.start();
@@ -181,7 +180,7 @@ public class TestContainerServer {
       Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline();
       OzoneConfiguration conf = new OzoneConfiguration();
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          pipeline.getLeader()
+          pipeline.getFirstNode()
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
 
       ContainerSet containerSet = new ContainerSet();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
index dfe7894..f067292 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
@@ -69,7 +69,7 @@ public class TestAllocateContainer {
         xceiverClientManager.getFactor(),
         containerOwner);
     Assert.assertNotNull(container);
-    Assert.assertNotNull(container.getPipeline().getLeader());
+    Assert.assertNotNull(container.getPipeline().getFirstNode());
 
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
index 03a0b8a..834dff0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.
     ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.
     StorageContainerException;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
index eb533e8..259f842 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
@@ -127,12 +127,15 @@ public class ScmBlockLocationTestIngClient implements ScmBlockLocationProtocol {
   }
 
   private Pipeline createPipeline(DatanodeDetails datanode) {
-    final Pipeline pipeline =
-        new Pipeline(datanode.getUuidString(), HddsProtos.LifeCycleState.OPEN,
-            HddsProtos.ReplicationType.STAND_ALONE,
-            HddsProtos.ReplicationFactor.ONE,
-            PipelineID.randomId());
-    pipeline.addMember(datanode);
+    List<DatanodeDetails> dns = new ArrayList<>();
+    dns.add(datanode);
+    Pipeline pipeline = Pipeline.newBuilder()
+        .setState(Pipeline.PipelineState.OPEN)
+        .setId(PipelineID.randomId())
+        .setType(HddsProtos.ReplicationType.STAND_ALONE)
+        .setFactor(HddsProtos.ReplicationFactor.ONE)
+        .setNodes(dns)
+        .build();
     return pipeline;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
index f29a5e6..9c0b541 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
@@ -19,12 +19,13 @@
 package org.apache.hadoop.ozone.genesis;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.util.Time;
@@ -39,7 +40,6 @@ import java.io.IOException;
 import java.util.UUID;
 import java.util.List;
 import java.util.ArrayList;
-import java.util.Iterator;
 import java.util.Objects;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -149,17 +149,16 @@ public class BenchMarkContainerStateMap {
   public static Pipeline createPipeline(String containerName,
       Iterable<DatanodeDetails> ids) throws IOException {
     Objects.requireNonNull(ids, "ids == null");
-    final Iterator<DatanodeDetails> i = ids.iterator();
-    Preconditions.checkArgument(i.hasNext());
-    final DatanodeDetails leader = i.next();
-    final Pipeline pipeline =
-        new Pipeline(leader.getUuidString(), OPEN,
-            ReplicationType.STAND_ALONE, ReplicationFactor.ONE,
-            PipelineID.randomId());
-    pipeline.addMember(leader);
-    for (; i.hasNext();) {
-      pipeline.addMember(i.next());
-    }
+    Preconditions.checkArgument(ids.iterator().hasNext());
+    List<DatanodeDetails> dns = new ArrayList<>();
+    ids.forEach(dns::add);
+    final Pipeline pipeline = Pipeline.newBuilder()
+        .setState(Pipeline.PipelineState.OPEN)
+        .setId(PipelineID.randomId())
+        .setType(HddsProtos.ReplicationType.STAND_ALONE)
+        .setFactor(HddsProtos.ReplicationFactor.ONE)
+        .setNodes(dns)
+        .build();
     return pipeline;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dce4ebe8/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index 922856b..9117838 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -120,7 +121,10 @@ public class TestContainerSQLCli {
     cluster.getStorageContainerManager().stop();
     eventQueue = new EventQueue();
     nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
-    containerManager = new SCMContainerManager(conf, nodeManager, eventQueue);
+    PipelineManager pipelineManager =
+        cluster.getStorageContainerManager().getPipelineManager();
+    containerManager = new SCMContainerManager(conf, nodeManager,
+        pipelineManager, eventQueue);
     blockManager = new BlockManagerImpl(
         conf, nodeManager, containerManager, eventQueue);
     eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: YARN-8871. Document ATSv2 integrated LogWebService. Contributed by Suma Shivaprasad.

Posted by su...@apache.org.
YARN-8871. Document ATSv2 integrated LogWebService. Contributed by Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a283da21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a283da21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a283da21

Branch: refs/heads/HDFS-12943
Commit: a283da21670c812a391d4c9ee98ebef22fc93868
Parents: 4ec4ec6
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Tue Oct 30 11:34:20 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Tue Oct 30 11:34:20 2018 +0530

----------------------------------------------------------------------
 .../hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md     | 5 +++++
 1 file changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a283da21/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index 04948ce..2314f30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -1570,3 +1570,8 @@ With this API, you can query set of available entity types for a given app id. I
 1. If any problem occurs in parsing request, HTTP 400 (Bad Request) is returned.
 1. If flow context information cannot be retrieved or entity for the given entity id cannot be found, HTTP 404 (Not Found) is returned.
 1. For non-recoverable errors while retrieving data, HTTP 500 (Internal Server Error) is returned.
+
+## <a name="Aggregated Log Serving for Historical Apps"></a>Aggregated Log Serving for Historical Apps
+
+ TimelineService v.2 supports serving aggregated logs of historical apps. To enable this, configure "yarn.log.server.web-service.url" to "${yarn .timeline-service.hostname}:8188/ws/v2/applicationlog"
+ in `yarn-site.xml`


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDDS-659. Implement pagination in GET bucket (object list) endpoint. Contributed by Bharat Viswanadham.

Posted by su...@apache.org.
HDDS-659. Implement pagination in GET bucket (object list) endpoint. Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b519f3f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b519f3f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b519f3f2

Branch: refs/heads/HDFS-12943
Commit: b519f3f2a0ae960391ce7bff59f1fdd21a22e030
Parents: ecac351
Author: Márton Elek <el...@apache.org>
Authored: Wed Oct 31 12:21:38 2018 +0100
Committer: Márton Elek <el...@apache.org>
Committed: Wed Oct 31 13:29:01 2018 +0100

----------------------------------------------------------------------
 .../ozone/s3/endpoint/BucketEndpoint.java       | 109 ++++++---
 .../ozone/s3/endpoint/ListObjectResponse.java   |  22 ++
 .../apache/hadoop/ozone/s3/util/S3Consts.java   |   1 +
 .../apache/hadoop/ozone/s3/util/S3utils.java    |  73 ++++++
 .../hadoop/ozone/client/OzoneBucketStub.java    |   7 +-
 .../hadoop/ozone/s3/endpoint/TestBucketGet.java | 227 ++++++++++++++++++-
 6 files changed, 400 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b519f3f2/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 8f554ed..04e2348 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -36,7 +36,6 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.time.Instant;
 import java.util.Iterator;
-import javax.ws.rs.core.Response.ResponseBuilder;
 
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneKey;
@@ -48,10 +47,13 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 
 import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.ozone.s3.util.S3utils;
 import org.apache.http.HttpStatus;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.ozone.s3.util.S3Consts.ENCODING_TYPE;
+
 /**
  * Bucket level rest endpoints.
  */
@@ -76,6 +78,8 @@ public class BucketEndpoint extends EndpointBase {
       @DefaultValue("1000") @QueryParam("max-keys") int maxKeys,
       @QueryParam("prefix") String prefix,
       @QueryParam("browser") String browser,
+      @QueryParam("continuation-token") String continueToken,
+      @QueryParam("start-after") String startAfter,
       @Context HttpHeaders hh) throws OS3Exception, IOException {
 
     if (browser != null) {
@@ -87,60 +91,91 @@ public class BucketEndpoint extends EndpointBase {
       }
     }
 
-    if (delimiter == null) {
-      delimiter = "/";
-    }
     if (prefix == null) {
       prefix = "";
     }
 
     OzoneBucket bucket = getBucket(bucketName);
 
-    Iterator<? extends OzoneKey> ozoneKeyIterator = bucket.listKeys(prefix);
+    Iterator<? extends OzoneKey> ozoneKeyIterator;
+
+    String decodedToken = S3utils.decodeContinueToken(continueToken);
+
+    if (startAfter != null && continueToken != null) {
+      // If continuation token and start after both are provided, then we
+      // ignore start After
+      ozoneKeyIterator = bucket.listKeys(prefix, decodedToken);
+    } else if (startAfter != null && continueToken == null) {
+      ozoneKeyIterator = bucket.listKeys(prefix, startAfter);
+    } else if (startAfter == null && continueToken != null){
+      ozoneKeyIterator = bucket.listKeys(prefix, decodedToken);
+    } else {
+      ozoneKeyIterator = bucket.listKeys(prefix);
+    }
+
 
     ListObjectResponse response = new ListObjectResponse();
     response.setDelimiter(delimiter);
     response.setName(bucketName);
     response.setPrefix(prefix);
     response.setMarker("");
-    response.setMaxKeys(1000);
-    response.setEncodingType("url");
+    response.setMaxKeys(maxKeys);
+    response.setEncodingType(ENCODING_TYPE);
     response.setTruncated(false);
+    response.setContinueToken(continueToken);
 
     String prevDir = null;
+    String lastKey = null;
+    int count = 0;
     while (ozoneKeyIterator.hasNext()) {
       OzoneKey next = ozoneKeyIterator.next();
       String relativeKeyName = next.getName().substring(prefix.length());
 
-      int depth =
-          StringUtils.countMatches(relativeKeyName, delimiter);
+      int depth = StringUtils.countMatches(relativeKeyName, delimiter);
+      if (delimiter != null) {
+        if (depth > 0) {
+          // means key has multiple delimiters in its value.
+          // ex: dir/dir1/dir2, where delimiter is "/" and prefix is dir/
+          String dirName = relativeKeyName.substring(0, relativeKeyName
+              .indexOf(delimiter));
+          if (!dirName.equals(prevDir)) {
+            response.addPrefix(prefix + dirName + delimiter);
+            prevDir = dirName;
+            count++;
+          }
+        } else if (relativeKeyName.endsWith(delimiter)) {
+          // means or key is same as prefix with delimiter at end and ends with
+          // delimiter. ex: dir/, where prefix is dir and delimiter is /
+          response.addPrefix(relativeKeyName);
+          count++;
+        } else {
+          // means our key is matched with prefix if prefix is given and it
+          // does not have any common prefix.
+          addKey(response, next);
+          count++;
+        }
+      } else {
+        addKey(response, next);
+        count++;
+      }
 
-      if (prefix.length() > 0 && !prefix.endsWith(delimiter)
-          && relativeKeyName.length() > 0) {
-        response.addPrefix(prefix + "/");
+      if (count == maxKeys) {
+        lastKey = next.getName();
         break;
       }
-      if (depth > 0) {
-        String dirName = relativeKeyName
-            .substring(0, relativeKeyName.indexOf(delimiter));
-        if (!dirName.equals(prevDir)) {
-          response.addPrefix(
-              prefix + dirName + delimiter);
-          prevDir = dirName;
-        }
-      } else if (relativeKeyName.endsWith(delimiter)) {
-        response.addPrefix(relativeKeyName);
-      } else if (relativeKeyName.length() > 0) {
-        KeyMetadata keyMetadata = new KeyMetadata();
-        keyMetadata.setKey(next.getName());
-        keyMetadata.setSize(next.getDataSize());
-        keyMetadata.setETag("" + next.getModificationTime());
-        keyMetadata.setStorageClass("STANDARD");
-        keyMetadata
-            .setLastModified(Instant.ofEpochMilli(next.getModificationTime()));
-        response.addKey(keyMetadata);
-      }
     }
+
+    response.setKeyCount(count);
+
+    if (count < maxKeys) {
+      response.setTruncated(false);
+    } else if(ozoneKeyIterator.hasNext()) {
+      response.setTruncated(true);
+      response.setNextToken(S3utils.generateContinueToken(lastKey));
+    } else {
+      response.setTruncated(false);
+    }
+
     response.setKeyCount(
         response.getCommonPrefixes().size() + response.getContents().size());
     return Response.ok(response).build();
@@ -253,4 +288,14 @@ public class BucketEndpoint extends EndpointBase {
     }
     return result;
   }
+
+  private void addKey(ListObjectResponse response, OzoneKey next) {
+    KeyMetadata keyMetadata = new KeyMetadata();
+    keyMetadata.setKey(next.getName());
+    keyMetadata.setSize(next.getDataSize());
+    keyMetadata.setETag("" + next.getModificationTime());
+    keyMetadata.setStorageClass("STANDARD");
+    keyMetadata.setLastModified(Instant.ofEpochMilli(next.getModificationTime()));
+    response.addKey(keyMetadata);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b519f3f2/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java
index b9ab977..adb5f20 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java
@@ -60,6 +60,12 @@ public class ListObjectResponse {
   @XmlElement(name = "IsTruncated")
   private boolean isTruncated;
 
+  @XmlElement(name = "NextContinuationToken")
+  private String nextToken;
+
+  @XmlElement(name = "continueToken")
+  private String continueToken;
+
   @XmlElement(name = "Contents")
   private List<KeyMetadata> contents = new ArrayList<>();
 
@@ -148,6 +154,22 @@ public class ListObjectResponse {
     commonPrefixes.add(new CommonPrefix(relativeKeyName));
   }
 
+  public String getNextToken() {
+    return nextToken;
+  }
+
+  public void setNextToken(String nextToken) {
+    this.nextToken = nextToken;
+  }
+
+  public String getContinueToken() {
+    return continueToken;
+  }
+
+  public void setContinueToken(String continueToken) {
+    this.continueToken = continueToken;
+  }
+
   public int getKeyCount() {
     return keyCount;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b519f3f2/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
index 2e7b965..70d8a96 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java
@@ -15,5 +15,6 @@ public final class S3Consts {
 
   public static final String COPY_SOURCE_HEADER = "x-amz-copy-source";
   public static final String STORAGE_CLASS_HEADER = "x-amz-storage-class";
+  public static final String ENCODING_TYPE = "url";
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b519f3f2/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3utils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3utils.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3utils.java
new file mode 100644
index 0000000..8af0927
--- /dev/null
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3utils.java
@@ -0,0 +1,73 @@
+package org.apache.hadoop.ozone.s3.util;
+
+import org.apache.commons.codec.DecoderException;
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
+
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Utility class for S3.
+ */
+public final class S3utils {
+
+  private S3utils() {
+
+  }
+  private static final String CONTINUE_TOKEN_SEPERATOR = "-";
+
+  /**
+   * Generate a continuation token which is used in get Bucket.
+   * @param key
+   * @return if key is not null return continuation token, else returns null.
+   */
+  public static String generateContinueToken(String key) {
+    if (key != null) {
+      byte[] byteData = key.getBytes(StandardCharsets.UTF_8);
+      String hex = Hex.encodeHexString(byteData);
+      String digest = DigestUtils.sha256Hex(key);
+      return hex + CONTINUE_TOKEN_SEPERATOR + digest;
+    } else {
+      return null;
+    }
+  }
+
+  /**
+   * Decode a continuation token which is used in get Bucket.
+   * @param key
+   * @return if key is not null return decoded token, otherwise returns null.
+   * @throws OS3Exception
+   */
+  public static String decodeContinueToken(String key) throws OS3Exception {
+    if (key != null) {
+      int indexSeparator = key.indexOf(CONTINUE_TOKEN_SEPERATOR);
+      if (indexSeparator == -1) {
+        throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, key);
+      }
+      String hex = key.substring(0, indexSeparator);
+      String digest = key.substring(indexSeparator + 1);
+      try {
+        byte[] actualKeyBytes = Hex.decodeHex(hex);
+        String digestActualKey = DigestUtils.sha256Hex(actualKeyBytes);
+        if (digest.equals(digestActualKey)) {
+          return new String(actualKeyBytes);
+        } else {
+          OS3Exception ex = S3ErrorTable.newError(S3ErrorTable
+              .INVALID_ARGUMENT, key);
+          ex.setErrorMessage("The continuation token provided is incorrect");
+          throw ex;
+        }
+      } catch (DecoderException ex) {
+        OS3Exception os3Exception = S3ErrorTable.newError(S3ErrorTable
+            .INVALID_ARGUMENT, key);
+        os3Exception.setErrorMessage("The continuation token provided is " +
+            "incorrect");
+        throw os3Exception;
+      }
+    } else {
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b519f3f2/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index 6aff087..918e9d0 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -27,6 +27,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.hdds.client.ReplicationFactor;
@@ -116,7 +117,8 @@ public class OzoneBucketStub extends OzoneBucket {
 
   @Override
   public Iterator<? extends OzoneKey> listKeys(String keyPrefix) {
-    return keyDetails.values()
+    Map<String, OzoneKey> sortedKey = new TreeMap<String, OzoneKey>(keyDetails);
+    return sortedKey.values()
         .stream()
         .filter(key -> key.getName().startsWith(keyPrefix))
         .collect(Collectors.toList())
@@ -126,7 +128,8 @@ public class OzoneBucketStub extends OzoneBucket {
   @Override
   public Iterator<? extends OzoneKey> listKeys(String keyPrefix,
       String prevKey) {
-    return keyDetails.values()
+    Map<String, OzoneKey> sortedKey = new TreeMap<String, OzoneKey>(keyDetails);
+    return sortedKey.values()
         .stream()
         .filter(key -> key.getName().compareTo(prevKey) > 0)
         .filter(key -> key.getName().startsWith(keyPrefix))

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b519f3f2/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java
index 54534ed..8b70d29 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.junit.Assert.fail;
+
 /**
  * Testing basic object list browsing.
  */
@@ -45,7 +47,7 @@ public class TestBucketGet {
 
     ListObjectResponse getBucketResponse =
         (ListObjectResponse) getBucket
-            .list("b1", "/", null, null, 100, "", null, null)
+            .list("b1", "/", null, null, 100, "", null, null, null, null)
             .getEntity();
 
     Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
@@ -68,8 +70,8 @@ public class TestBucketGet {
     getBucket.setClient(client);
 
     ListObjectResponse getBucketResponse =
-        (ListObjectResponse) getBucket
-            .list("b1", "/", null, null, 100, "dir1", null, null).getEntity();
+        (ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
+            "dir1", null, null, null, null).getEntity();
 
     Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
     Assert.assertEquals("dir1/",
@@ -85,13 +87,16 @@ public class TestBucketGet {
     BucketEndpoint getBucket = new BucketEndpoint();
 
     OzoneClient ozoneClient =
-        createClientWithKeys("dir1/file2", "dir1/dir2/file2");
+        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
+            "dir1bha/file2");
 
     getBucket.setClient(ozoneClient);
 
     ListObjectResponse getBucketResponse =
         (ListObjectResponse) getBucket
-            .list("b1", "/", null, null, 100, "dir1/", null, null).getEntity();
+            .list("b1", "/", null, null, 100, "dir1/", null, null,
+                null, null)
+            .getEntity();
 
     Assert.assertEquals(1, getBucketResponse.getCommonPrefixes().size());
     Assert.assertEquals("dir1/dir2/",
@@ -103,6 +108,218 @@ public class TestBucketGet {
 
   }
 
+
+  @Test
+  public void listWithPrefixAndDelimiter() throws OS3Exception, IOException {
+
+    BucketEndpoint getBucket = new BucketEndpoint();
+
+    OzoneClient ozoneClient =
+        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
+            "dir1bha/file2", "file2");
+
+    getBucket.setClient(ozoneClient);
+
+    ListObjectResponse getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
+            "dir1", null, null, null, null).getEntity();
+
+    Assert.assertEquals(3, getBucketResponse.getCommonPrefixes().size());
+
+  }
+
+  @Test
+  public void listWithPrefixAndDelimiter1() throws OS3Exception, IOException {
+
+    BucketEndpoint getBucket = new BucketEndpoint();
+
+    OzoneClient ozoneClient =
+        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
+            "dir1bha/file2", "file2");
+
+    getBucket.setClient(ozoneClient);
+
+    ListObjectResponse getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
+            "", null, null, null, null).getEntity();
+
+    Assert.assertEquals(3, getBucketResponse.getCommonPrefixes().size());
+    Assert.assertEquals("file2", getBucketResponse.getContents().get(0)
+        .getKey());
+
+  }
+
+  @Test
+  public void listWithPrefixAndDelimiter2() throws OS3Exception, IOException {
+
+    BucketEndpoint getBucket = new BucketEndpoint();
+
+    OzoneClient ozoneClient =
+        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
+            "dir1bha/file2", "file2");
+
+    getBucket.setClient(ozoneClient);
+
+    ListObjectResponse getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", "/", null, null, 100,
+            "dir1bh", null, null, "dir1/dir2/file2", null).getEntity();
+
+    Assert.assertEquals(2, getBucketResponse.getCommonPrefixes().size());
+
+  }
+
+  @Test
+  public void listWithContinuationToken() throws OS3Exception, IOException {
+
+    BucketEndpoint getBucket = new BucketEndpoint();
+
+    OzoneClient ozoneClient =
+        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
+            "dir1bha/file2", "file2");
+
+    getBucket.setClient(ozoneClient);
+
+    int maxKeys = 2;
+    // As we have 5 keys, with max keys 2 we should call list 3 times.
+
+    // First time
+    ListObjectResponse getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
+            "", null, null, null, null).getEntity();
+
+    Assert.assertTrue(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getContents().size() == 2);
+
+    // 2nd time
+    String continueToken = getBucketResponse.getNextToken();
+    getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
+            "", null, continueToken, null, null).getEntity();
+    Assert.assertTrue(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getContents().size() == 2);
+
+
+    continueToken = getBucketResponse.getNextToken();
+
+    //3rd time
+    getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", null, null, null, maxKeys,
+            "", null, continueToken, null, null).getEntity();
+
+    Assert.assertFalse(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getContents().size() == 1);
+
+  }
+
+  @Test
+  /**
+   * This test is with prefix and delimiter and verify continuation-token
+   * behavior.
+   */
+  public void listWithContinuationToken1() throws OS3Exception, IOException {
+
+    BucketEndpoint getBucket = new BucketEndpoint();
+
+    OzoneClient ozoneClient =
+        createClientWithKeys("dir1/file1", "dir1bh/file1",
+            "dir1bha/file1", "dir0/file1", "dir2/file1");
+
+    getBucket.setClient(ozoneClient);
+
+    int maxKeys = 2;
+    // As we have 5 keys, with max keys 2 we should call list 3 times.
+
+    // First time
+    ListObjectResponse getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
+            "dir", null, null, null, null).getEntity();
+
+    Assert.assertTrue(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 2);
+
+
+    // 2nd time
+    String continueToken = getBucketResponse.getNextToken();
+    getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
+            "dir", null, continueToken, null, null).getEntity();
+    Assert.assertTrue(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 2);
+
+
+    //3rd time
+    continueToken = getBucketResponse.getNextToken();
+    getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", "/", null, null, maxKeys,
+            "dir", null, continueToken, null, null).getEntity();
+
+    Assert.assertFalse(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getCommonPrefixes().size() == 1);
+
+  }
+
+  @Test
+  public void listWithContinuationTokenFail() throws OS3Exception, IOException {
+
+    BucketEndpoint getBucket = new BucketEndpoint();
+
+    OzoneClient ozoneClient =
+        createClientWithKeys("dir1/file2", "dir1/dir2/file2", "dir1bh/file",
+            "dir1bha/file2", "dir1", "dir2", "dir3");
+
+    getBucket.setClient(ozoneClient);
+
+    try {
+      ListObjectResponse getBucketResponse =
+          (ListObjectResponse) getBucket.list("b1", "/", null, null, 2,
+              "dir", null, "random", null, null).getEntity();
+      fail("listWithContinuationTokenFail");
+    } catch (OS3Exception ex) {
+      Assert.assertEquals("random", ex.getResource());
+      Assert.assertEquals("Invalid Argument", ex.getErrorMessage());
+    }
+
+  }
+
+
+  @Test
+  public void testStartAfter() throws IOException, OS3Exception {
+    BucketEndpoint getBucket = new BucketEndpoint();
+
+    OzoneClient ozoneClient =
+        createClientWithKeys("dir1/file1", "dir1bh/file1",
+            "dir1bha/file1", "dir0/file1", "dir2/file1");
+
+    getBucket.setClient(ozoneClient);
+
+    ListObjectResponse getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", null, null, null, 1000,
+            null, null, null, null, null).getEntity();
+
+    Assert.assertFalse(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getContents().size() == 5);
+
+    //As our list output is sorted, after seeking to startAfter, we shall
+    // have 4 keys.
+    String startAfter = "dir0/file1";
+
+    getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", null, null, null,
+            1000, null, null, null, startAfter, null).getEntity();
+
+    Assert.assertFalse(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getContents().size() == 4);
+
+    getBucketResponse =
+        (ListObjectResponse) getBucket.list("b1", null, null, null,
+            1000, null, null, null, "random", null).getEntity();
+
+    Assert.assertFalse(getBucketResponse.isTruncated());
+    Assert.assertTrue(getBucketResponse.getContents().size() == 0);
+
+
+  }
+
   private OzoneClient createClientWithKeys(String... keys) throws IOException {
     OzoneClient client = new OzoneClientStub();
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDFS-14033. [libhdfs++] Disable libhdfs++ build on systems that do not support thread_local. Contributed by Anatoli Shein.

Posted by su...@apache.org.
HDFS-14033. [libhdfs++] Disable libhdfs++ build on systems that do not support thread_local. Contributed by Anatoli Shein.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c438abe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c438abe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c438abe

Branch: refs/heads/HDFS-12943
Commit: 9c438abe52d4ee0b25345a4b7ec1697dd66f85e9
Parents: fac9f91
Author: Sunil G <su...@apache.org>
Authored: Wed Oct 31 12:32:49 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Oct 31 12:32:49 2018 +0530

----------------------------------------------------------------------
 .../src/CMakeLists.txt                          | 22 +++++++++++++++++++-
 .../src/main/native/libhdfspp/CMakeLists.txt    |  4 ++--
 2 files changed, 23 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c438abe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index 1813ec1..026be9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -138,7 +138,27 @@ endif()
 
 add_subdirectory(main/native/libhdfs)
 add_subdirectory(main/native/libhdfs-tests)
-add_subdirectory(main/native/libhdfspp)
+
+# Temporary fix to disable Libhdfs++ build on older systems that do not support thread_local
+include(CheckCXXSourceCompiles)
+unset (THREAD_LOCAL_SUPPORTED CACHE)
+set (CMAKE_REQUIRED_DEFINITIONS "-std=c++11")
+set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_THREAD_LIBS_INIT})
+check_cxx_source_compiles(
+    "#include <thread>
+    int main(void) {
+      thread_local int s;
+      return 0;
+    }"
+    THREAD_LOCAL_SUPPORTED)
+if (THREAD_LOCAL_SUPPORTED)
+    add_subdirectory(main/native/libhdfspp)
+else()
+    message(WARNING
+    "WARNING: Libhdfs++ library was not built because the required feature thread_local storage \
+    is not supported by your compiler. Known compilers that support this feature: GCC 4.8+, Visual Studio 2015+, \
+    Clang (community version 3.3+), Clang (version for Xcode 8+ and iOS 9+).")
+endif (THREAD_LOCAL_SUPPORTED)
 
 if(REQUIRE_LIBWEBHDFS)
     add_subdirectory(contrib/libwebhdfs)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c438abe/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
index 63fa80d..411320a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/CMakeLists.txt
@@ -63,8 +63,8 @@ check_cxx_source_compiles(
 if (NOT THREAD_LOCAL_SUPPORTED)
   message(FATAL_ERROR
   "FATAL ERROR: The required feature thread_local storage is not supported by your compiler. \
-  Known compilers that support this feature: GCC, Visual Studio, Clang (community version), \
-  Clang (version for iOS 9 and later).")
+  Known compilers that support this feature: GCC 4.8+, Visual Studio 2015+, Clang (community \
+  version 3.3+), Clang (version for Xcode 8+ and iOS 9+).")
 endif (NOT THREAD_LOCAL_SUPPORTED)
 
 # Check if PROTOC library was compiled with the compatible compiler by trying


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: HDDS-702:Used fixed/external version from hadoop jars in hdds/ozone projects. Contributed by Elek Marton.

Posted by su...@apache.org.
HDDS-702:Used fixed/external version from hadoop jars in hdds/ozone projects. Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4b9b7c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4b9b7c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4b9b7c1

Branch: refs/heads/HDFS-12943
Commit: a4b9b7c1302b886279960c2817be3d198347d92e
Parents: 9b899f1
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Thu Oct 25 16:26:48 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Thu Oct 25 16:26:48 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/pom.xml    | 161 +++++++++++++++++++++++++++++++++++++++++++-
 hadoop-ozone/pom.xml   | 139 +++++++++++++++++++++++++++++++++++++-
 hadoop-project/pom.xml | 158 -------------------------------------------
 pom.xml                |   2 -
 4 files changed, 295 insertions(+), 165 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4b9b7c1/hadoop-hdds/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index a5fb32d..bedf78d 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -19,9 +19,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-project-dist</artifactId>
-    <version>3.3.0-SNAPSHOT</version>
-    <relativePath>../hadoop-project-dist</relativePath>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.2.1-SNAPSHOT</version>
+    <relativePath/>
   </parent>
 
   <artifactId>hadoop-hdds</artifactId>
@@ -40,6 +40,143 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
   </modules>
 
+  <properties>
+    <!-- version for hdds/ozone components -->
+    <hdds.version>0.4.0-SNAPSHOT</hdds.version>
+
+    <!-- Apache Ratis version -->
+    <ratis.version>0.3.0-aa38160-SNAPSHOT</ratis.version>
+
+    <bouncycastle.version>1.60</bouncycastle.version>
+
+    <protobuf-maven-plugin.version>0.5.1</protobuf-maven-plugin.version>
+    <os-maven-plugin.version>1.5.0.Final</os-maven-plugin.version>
+
+
+  </properties>
+  <repositories>
+    <repository>
+      <id>apache.snapshots.https</id>
+      <url>https://repository.apache.org/content/repositories/snapshots</url>
+    </repository>
+  </repositories>
+  <pluginRepositories>
+    <pluginRepository>
+      <id>apache.snapshots.https</id>
+      <url>https://repository.apache.org/content/repositories/snapshots</url>
+    </pluginRepository>
+  </pluginRepositories>
+  <dependencyManagement>
+    <dependencies>
+
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-common</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-client</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-tools</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-server-framework</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-server-scm</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-container-service</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-container-service</artifactId>
+        <version>${hdds.version}</version>
+        <type>test-jar</type>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-server-scm</artifactId>
+        <type>test-jar</type>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.openjdk.jmh</groupId>
+        <artifactId>jmh-core</artifactId>
+        <version>1.19</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.openjdk.jmh</groupId>
+        <artifactId>jmh-generator-annprocess</artifactId>
+        <version>1.19</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.ratis</groupId>
+        <artifactId>ratis-proto-shaded</artifactId>
+        <version>${ratis.version}</version>
+      </dependency>
+      <dependency>
+        <artifactId>ratis-common</artifactId>
+        <groupId>org.apache.ratis</groupId>
+        <version>${ratis.version}</version>
+      </dependency>
+      <dependency>
+        <artifactId>ratis-client</artifactId>
+        <groupId>org.apache.ratis</groupId>
+        <version>${ratis.version}</version>
+      </dependency>
+      <dependency>
+        <artifactId>ratis-server</artifactId>
+        <groupId>org.apache.ratis</groupId>
+        <version>${ratis.version}</version>
+      </dependency>
+      <dependency>
+        <artifactId>ratis-netty</artifactId>
+        <groupId>org.apache.ratis</groupId>
+        <version>${ratis.version}</version>
+      </dependency>
+      <dependency>
+        <artifactId>ratis-grpc</artifactId>
+        <groupId>org.apache.ratis</groupId>
+        <version>${ratis.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.bouncycastle</groupId>
+        <artifactId>bcprov-jdk15on</artifactId>
+        <version>${bouncycastle.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.bouncycastle</groupId>
+        <artifactId>bcpkix-jdk15on</artifactId>
+        <version>${bouncycastle.version}</version>
+      </dependency>
+
+
+    </dependencies>
+  </dependencyManagement>
   <dependencies>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -88,7 +225,25 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   </dependencies>
   <build>
     <plugins>
+
       <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>depcheck</id>
+            <configuration>
+              <rules>
+                <DependencyConvergence>
+                  <uniqueVersions>false</uniqueVersions>
+                </DependencyConvergence>
+              </rules>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4b9b7c1/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 0e02922..5e53134 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -20,8 +20,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.3.0-SNAPSHOT</version>
-    <relativePath>../hadoop-project</relativePath>
+    <version>3.2.1-SNAPSHOT</version>
+    <relativePath/>
   </parent>
   <artifactId>hadoop-ozone</artifactId>
   <version>0.4.0-SNAPSHOT</version>
@@ -30,9 +30,15 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <packaging>pom</packaging>
 
   <properties>
+    <hadoop.version>3.2.1-SNAPSHOT</hadoop.version>
+    <hdds.version>0.4.0-SNAPSHOT</hdds.version>
     <ozone.version>0.4.0-SNAPSHOT</ozone.version>
+    <ratis.version>0.3.0-aa38160-SNAPSHOT</ratis.version>
+    <bouncycastle.version>1.60</bouncycastle.version>
     <ozone.release>Badlands</ozone.release>
     <declared.ozone.version>${ozone.version}</declared.ozone.version>
+
+
   </properties>
   <modules>
     <module>common</module>
@@ -48,6 +54,119 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <module>docs</module>
   </modules>
 
+  <repositories>
+    <repository>
+      <id>apache.snapshots.https</id>
+      <url>https://repository.apache.org/content/repositories/snapshots</url>
+    </repository>
+  </repositories>
+
+  <dependencyManagement>
+
+    <dependencies>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-common</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-client</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-ozone-manager</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-objectstore-service</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-s3gateway</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-datanode</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-tools</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-filesystem</artifactId>
+        <version>${ozone.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-integration-test</artifactId>
+        <version>${ozone.version}</version>
+        <type>test-jar</type>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-ozone-ozone-manager</artifactId>
+        <version>${ozone.version}</version>
+        <type>test-jar</type>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-common</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-server-framework</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-server-scm</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-container-service</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-client</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-tools</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-container-service</artifactId>
+        <version>${hdds.version}</version>
+        <type>test-jar</type>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-server-scm</artifactId>
+        <type>test-jar</type>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.bouncycastle</groupId>
+        <artifactId>bcprov-jdk15on</artifactId>
+        <version>${bouncycastle.version}</version>
+      </dependency>
+    </dependencies>
+  </dependencyManagement>
   <dependencies>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -119,6 +238,22 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <build>
     <plugins>
       <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>depcheck</id>
+            <configuration>
+              <rules>
+                <DependencyConvergence>
+                  <uniqueVersions>false</uniqueVersions>
+                </DependencyConvergence>
+              </rules>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4b9b7c1/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index e9c5c0f..963d24b 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -102,8 +102,6 @@
     <apacheds.version>2.0.0-M21</apacheds.version>
     <ldap-api.version>1.0.0-M33</ldap-api.version>
 
-    <!-- Apache Ratis version -->
-    <ratis.version>0.3.0-aa38160-SNAPSHOT</ratis.version>
     <jcache.version>1.0-alpha-1</jcache.version>
     <ehcache.version>3.3.1</ehcache.version>
     <hikari.version>2.4.12</hikari.version>
@@ -582,132 +580,6 @@
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-objectstore-service</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-docs</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-filesystem</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-common</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-client</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-tools</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-tools</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-datanode</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-s3gateway</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-integration-test</artifactId>
-        <version>${hdds.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-framework</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-scm</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-container-service</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-container-service</artifactId>
-        <version>${hdds.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdds-server-scm</artifactId>
-        <type>test-jar</type>
-        <version>${hdds.version}</version>
-      </dependency>
-
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-common</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-ozone-manager</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-ozone-manager</artifactId>
-        <version>${hdds.version}</version>
-        <type>test-jar</type>
-      </dependency>
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-ozone-client</artifactId>
-        <version>${hdds.version}</version>
-      </dependency>
-
-      <dependency>
-        <groupId>org.openjdk.jmh</groupId>
-        <artifactId>jmh-core</artifactId>
-        <version>1.19</version>
-      </dependency>
-      <dependency>
-        <groupId>org.openjdk.jmh</groupId>
-        <artifactId>jmh-generator-annprocess</artifactId>
-        <version>1.19</version>
-      </dependency>
-
-
-      <dependency>
-        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-kms</artifactId>
         <version>${hadoop.version}</version>
       </dependency>
@@ -931,36 +803,6 @@
         </exclusions>
       </dependency>
 
-      <dependency>
-        <groupId>org.apache.ratis</groupId>
-        <artifactId>ratis-proto-shaded</artifactId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-common</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-client</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-server</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-netty</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
-      <dependency>
-        <artifactId>ratis-grpc</artifactId>
-        <groupId>org.apache.ratis</groupId>
-        <version>${ratis.version}</version>
-      </dependency>
 
       <dependency>
         <groupId>io.netty</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4b9b7c1/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 5e81573..327fcdd 100644
--- a/pom.xml
+++ b/pom.xml
@@ -116,8 +116,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
 
     <shell-executable>bash</shell-executable>
 
-    <!-- version for hdds/ozone components -->
-    <hdds.version>0.4.0-SNAPSHOT</hdds.version>
   </properties>
 
   <modules>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDFS-14027. DFSStripedOutputStream should implement both hsync methods.

Posted by su...@apache.org.
HDFS-14027. DFSStripedOutputStream should implement both hsync methods.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db7e6368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db7e6368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db7e6368

Branch: refs/heads/HDFS-12943
Commit: db7e636824a36b90ba1c8e9b2fba1162771700fe
Parents: 496f0ff
Author: Xiao Chen <xi...@apache.org>
Authored: Mon Oct 29 19:05:52 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Mon Oct 29 19:06:15 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/DFSStripedOutputStream.java     | 12 +++++++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 36 +++++++++++++-------
 2 files changed, 35 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db7e6368/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index ed875bb..df9770e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.StreamCapabilities;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -956,11 +957,22 @@ public class DFSStripedOutputStream extends DFSOutputStream
   @Override
   public void hflush() {
     // not supported yet
+    LOG.debug("DFSStripedOutputStream does not support hflush. "
+        + "Caller should check StreamCapabilities before calling.");
   }
 
   @Override
   public void hsync() {
     // not supported yet
+    LOG.debug("DFSStripedOutputStream does not support hsync. "
+        + "Caller should check StreamCapabilities before calling.");
+  }
+
+  @Override
+  public void hsync(EnumSet<SyncFlag> syncFlags) {
+    // not supported yet
+    LOG.debug("DFSStripedOutputStream does not support hsync {}. "
+        + "Caller should check StreamCapabilities before calling.", syncFlags);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db7e6368/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 865a736..092aa0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
+import java.util.EnumSet;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -31,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
+import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.io.IOUtils;
@@ -196,19 +199,26 @@ public class TestDFSStripedOutputStream {
   public void testStreamFlush() throws Exception {
     final byte[] bytes = StripedFileTestUtil.generateBytes(blockSize *
         dataBlocks * 3 + cellSize * dataBlocks + cellSize + 123);
-    FSDataOutputStream os = fs.create(new Path("/ec-file-1"));
-    assertFalse("DFSStripedOutputStream should not have hflush() " +
-            "capability yet!", os.hasCapability(
-                StreamCapability.HFLUSH.getValue()));
-    assertFalse("DFSStripedOutputStream should not have hsync() " +
-            "capability yet!", os.hasCapability(
-                StreamCapability.HSYNC.getValue()));
-    InputStream is = new ByteArrayInputStream(bytes);
-    IOUtils.copyBytes(is, os, bytes.length);
-    os.hflush();
-    IOUtils.copyBytes(is, os, bytes.length);
-    os.hsync();
-    os.close();
+    try (FSDataOutputStream os = fs.create(new Path("/ec-file-1"))) {
+      assertFalse(
+          "DFSStripedOutputStream should not have hflush() capability yet!",
+          os.hasCapability(StreamCapability.HFLUSH.getValue()));
+      assertFalse(
+          "DFSStripedOutputStream should not have hsync() capability yet!",
+          os.hasCapability(StreamCapability.HSYNC.getValue()));
+      try (InputStream is = new ByteArrayInputStream(bytes)) {
+        IOUtils.copyBytes(is, os, bytes.length);
+        os.hflush();
+        IOUtils.copyBytes(is, os, bytes.length);
+        os.hsync();
+        IOUtils.copyBytes(is, os, bytes.length);
+      }
+      assertTrue("stream is not a DFSStripedOutputStream",
+          os.getWrappedStream() instanceof DFSStripedOutputStream);
+      final DFSStripedOutputStream dfssos =
+          (DFSStripedOutputStream) os.getWrappedStream();
+      dfssos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
+    }
   }
 
   private void testOneFile(String src, int writeBytes) throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HADOOP-15855. Review hadoop credential doc, including object store details. Contributed by Steve Loughran.

Posted by su...@apache.org.
HADOOP-15855. Review hadoop credential doc, including object store details.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62d98ca9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62d98ca9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62d98ca9

Branch: refs/heads/HDFS-12943
Commit: 62d98ca92aee15d1790d169bfdf0043b05b748ce
Parents: 7757331
Author: Steve Loughran <st...@apache.org>
Authored: Tue Oct 30 15:58:04 2018 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Tue Oct 30 15:58:04 2018 +0000

----------------------------------------------------------------------
 .../src/site/markdown/CredentialProviderAPI.md  | 130 ++++++++++++++-----
 .../hadoop/crypto/key/TestKeyProvider.java      |  32 +++--
 2 files changed, 119 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d98ca9/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
index bd1c2c7..0c5f486 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CredentialProviderAPI.md
@@ -32,10 +32,12 @@ Overview
 
 Usage
 -----
+
 ### Usage Overview
 Let's provide a quick overview of the use of the credential provider framework for protecting passwords or other sensitive tokens in hadoop.
 
 ##### Why is it used?
+
 There are certain deployments that are very sensitive to how sensitive tokens like passwords are stored and managed within the cluster. For instance, there may be security best practices and policies in place that require such things to never be stored in clear text, for example. Enterprise deployments may be required to use a preferred solution for managing credentials and we need a way to plug in integrations for them.
 
 ##### General Usage Pattern
@@ -48,46 +50,46 @@ There are numerous places within the Hadoop project and ecosystem that can lever
 3. Features or components that leverage the new [Configuration.getPassword](../../api/org/apache/hadoop/conf/Configuration.html#getPassword-java.lang.String-) method to resolve their credentials will automatically pick up support for the credential provider API.
     - By using the same property names as are used for existing clear text passwords, this mechanism allows for the migration to credential providers while providing backward compatibility for clear text.
     - The entire credential provider path is interrogated before falling back to clear text passwords in config.
-4. Features or components that do not use the hadoop Configuration class for config or have other internal uses for the credential providers may choose to write to the CredentialProvider API itself. An example of its use will be included in this document but may also be found within [Configuration.getPassword](../../api/org/apache/hadoop/conf/Configuration.html#getPassword-java.lang.String-) and within the unit tests of features that have added support and need to provision credentials for the tests.
+4. Features or components that do not use Hadoop's `org.apache.hadoop.conf.Configuration` class for configuration or have other internal uses for the credential providers may choose to use the `CredentialProvider` API itself. An example of its use can be found within [Configuration.getPassword](../../api/org/apache/hadoop/conf/Configuration.html#getPassword-java.lang.String-) and within its unit tests.
 
 ##### Provision Credentials
-Example: ssl.server.keystore.password
+Example: `ssl.server.keystore.password`
 
-```
-    hadoop credential create ssl.server.keystore.password -value 123
-      -provider localjceks://file/home/lmccay/aws.jceks
+```bash
+hadoop credential create ssl.server.keystore.password -value 123 \
+  -provider localjceks://file/home/lmccay/aws.jceks
 ```
 
-Note that the alias names are the same as the configuration properties that were used to get the
-credentials from the Configuration.get method. Reusing these names allows for intuitive
-migration to the use of credential providers and fall back logic for backward compatibility.
+The alias names are the same as the configuration properties that were used to get the
+credentials from the `Configuration.get()` methods.
 
 ##### Configuring the Provider Path
+
 Now, we need to make sure that this provisioned credential store is known at runtime by the
 [Configuration.getPassword](../../api/org/apache/hadoop/conf/Configuration.html#getPassword-java.lang.String-) method. If there is no credential provider path configuration then
-getPassword will skip the credential provider API interrogation. So, it is important that the
-following be configured within core-site.xml or your component's equivalent.
-
-```
-    <property>
-      <name>hadoop.security.credential.provider.path</name>
-      <value>localjceks://file/home/lmccay/aws.jceks</value>
-      <description>Path to interrogate for protected credentials.</description>
-    </property>
+`Configuration.getPassword()` will skip the credential provider API interrogation. So, it is important that the
+following be configured within `core-site.xml` or your component's equivalent.
+
+```xml
+<property>
+  <name>hadoop.security.credential.provider.path</name>
+  <value>localjceks://file/home/lmccay/aws.jceks</value>
+  <description>Path to interrogate for protected credentials.</description>
+</property>
 ```
 
 A couple additional things to note about the provider path:
 
 1. The scheme is used to indicate the type of provider in the above case the
- localjceks provider does not have a dependency on the Hadoop fs abstraction
+ `localjceks` provider does not have a dependency on the Hadoop FileSystem APIs.
  and is needed sometimes to avoid a recursive dependency. Another provider
- represented by jceks, does use the Hadoop fs abstraction and therefore has
- support for keystores provisioned within HDFS. A third provider type is the
- user type. This provider can manage credentials stored within the Credentials
+ represented by `jceks`, does use the Hadoop FileSystem APIs and can
+ support keystores provisioned within HDFS or other compatible filesystems.
+ A third provider type is the `user` type.
+  This provider can manage credentials stored within the Credentials
  file for a process.
 2. The path configuration accepts a comma separated path of providers or
- credential stores. The [Configuration.getPassword](../../api/org/apache/hadoop/conf/Configuration.html#getPassword-java.lang.String-) method will walk through
- all of the providers until it resolves the alias or exhausts the list.
+ credential stores. The [Configuration.getPassword](../../api/org/apache/hadoop/conf/Configuration.html#getPassword-java.lang.String-) method will query each of the providers, in order until it resolves the alias or exhausts the list.
  Depending on the runtime needs for credentials, we may need to configure
  a chain of providers to check.
 
@@ -98,13 +100,13 @@ In summary, first, provision the credentials into a provider then configure the
 |:---- |:---- |:---|
 |LDAPGroupsMapping    |LDAPGroupsMapping is used to look up the groups for a given user in LDAP. The CredentialProvider API is used to protect the LDAP bind password and those needed for SSL.|[LDAP Groups Mapping](GroupsMapping.html#LDAP_Groups_Mapping)|
 |SSL Passwords        |FileBasedKeyStoresFactory leverages the credential provider API in order to resolve the SSL related passwords.|TODO|
-|HDFS                 |DFSUtil leverages Configuration.getPassword method to use the credential provider API and/or fallback to the clear text value stored in ssl-server.xml. Zookeeper based federation state store and failover controller use Configuration.getPassword to get the Zookeeper authentication info, with fallback provided to clear text auth info.|TODO|
+|HDFS                 |DFSUtil uses `Configuration.getPassword()` use the credential provider API and/or fallback to the clear text value stored in `ssl-server.xml`. Zookeeper-based federation state store and failover controller use Configuration.getPassword to get the Zookeeper authentication info, with fallback provided to clear text auth info.|TODO|
 |YARN                 |WebAppUtils uptakes the use of the credential provider API through the new method on Configuration called getPassword. This provides an alternative to storing the passwords in clear text within the ssl-server.xml file while maintaining backward compatibility. Zookeeper based resource manager state store uses Configuration.getPassword to get the Zookeeper authentication info, with fallback provided to clear text auth info.|TODO|
 |KMS                  |Uses HttpServer2.loadSSLConfiguration that leverages Configuration.getPassword to read SSL related credentials. They may be resolved through Credential Provider and/or from the clear text in the config when allowed.|[KMS](../../hadoop-kms/index.html)|
 |HttpFS               |Uses HttpServer2.loadSSLConfiguration that leverages Configuration.getPassword to read SSL related credentials. They may be resolved through Credential Provider and/or from the clear text in the  config when allowed.|[HttpFS Server Setup](../../hadoop-hdfs-httpfs/ServerSetup.html)|
-|AWS <br/> S3/S3A     |Uses Configuration.getPassword to get the S3 credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[AWS S3/S3A Usage](../../hadoop-aws/tools/hadoop-aws/index.html)|
-|Azure <br/> WASB     |Uses Configuration.getPassword to get the WASB credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[Azure WASB Usage](../../hadoop-azure/index.html)|
-|Azure <br/> ADLS     |Uses Configuration.getPassword to get the ADLS credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[Azure ADLS Usage](../../hadoop-azure-datalake/index.html)|
+|AWS <br/> S3A     |Uses `Configuration.getPassword` to get the S3 credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[AWS S3/S3A Usage](../../hadoop-aws/tools/hadoop-aws/index.html)|
+|Azure <br/> WASB     |Uses `Configuration.getPassword` to get the WASB credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[Azure WASB Usage](../../hadoop-azure/index.html)|
+|Azure <br/> ADLS     |Uses `Configuration.getPassword` to get the ADLS credentials. They may be resolved through the credential provider API or from the config for backward compatibility.|[Azure ADLS Usage](../../hadoop-azure-datalake/index.html)|
 |Apache <br/> Accumulo|The trace.password property is used by the Tracer to authenticate with Accumulo and persist the traces in the trace table. The credential provider API is used to acquire the trace.password from a provider or from configuration for backward compatibility.|TODO|
 |Apache <br/> Slider  |A capability has been added to Slider to prompt the user for needed passwords and store them using CredentialProvider so they can be retrieved by an app later.|TODO|
 |Apache <br/> Hive    |Protection of the metastore password, SSL related passwords and JDO string password has been added through the use of the Credential Provider API|TODO|
@@ -114,13 +116,13 @@ In summary, first, provision the credentials into a provider then configure the
 
 ### Credential Management
 
-#### The hadoop credential Command
+#### The `hadoop credential` Command
 
 Usage: `hadoop credential <subcommand> [options]`
 
 See the command options detail in the [Commands Manual](CommandsManual.html#credential)
 
-Utilizing the credential command will often be for provisioning a password or secret to a particular credential store provider. In order to explicitly indicate which provider store to use the `-provider` option should be used.
+The credential command can be for provisioning a password or secret to a particular credential store provider. In order to explicitly indicate which provider store to use the `-provider` option should be used.
 
 Example: `hadoop credential create ssl.server.keystore.password -provider jceks://file/tmp/test.jceks`
 
@@ -128,13 +130,51 @@ In order to indicate a particular provider type and location, the user must prov
 
 #### Provider Types
 
-1. The `UserProvider`, which is representd by the provider URI `user:///`, is used to retrieve credentials from a user's Credentials file. This file is used to store various tokens, secrets and passwords that are needed by executing jobs and applications.
-2. The `JavaKeyStoreProvider`, which is represented by the provider URI `jceks://file|hdfs/path-to-keystore`, is used to retrieve credentials from a Java keystore. The underlying use of the Hadoop filesystem abstraction allows credentials to be stored on the local filesystem or within HDFS.
+1. The `UserProvider`, which is represented by the provider URI `user:///`, is used to retrieve credentials from a user's Credentials file. This file is used to store various tokens, secrets and passwords that are needed by executing jobs and applications.
+2. The `JavaKeyStoreProvider`, which is represented by the provider URI `jceks://SCHEME/path-to-keystore`, is used to retrieve credentials from a Java keystore file in a filesystem `<SCHEME>`
+ The underlying use of the Hadoop filesystem API allows credentials to be stored on the local filesystem or within cluster stores.
 3. The `LocalJavaKeyStoreProvider`, which is represented by the provider URI `localjceks://file/path-to-keystore`, is used to access credentials from a Java keystore that is must be stored on the local filesystem. This is needed for credentials that would result in a recursive dependency on accessing HDFS. Anytime that your credential is required to gain access to HDFS we can't depend on getting a credential out of HDFS to do so.
 
+When credentials are stored in a filesystem, the following rules apply:
+
+* Credentials stored in local `localjceks://` files are loaded in the process reading in the configuration.
+  For use in a YARN application, this means that they must be visible across the entire cluster, in the local filesystems of the hosts.
+
+* Credentials stored with the `jceks://` provider can be stored in the cluster filesystem,
+and so visible across the cluster —but not in the filesystem which requires the specific
+credentials for their access.
+
+To wrap filesystem URIs with a `jceks` URI follow these steps:
+
+1. Take a filesystem URI such as `hdfs://namenode:9001/users/alice/secrets.jceks`
+1. Place `jceks://` in front of the URL: `jceks://hdfs://namenode:9001/users/alice/secrets.jceks`
+1. Replace the second `://` string with an `@` symbol: `jceks://hdfs@namenode:9001/users/alice/secrets.jceks`
+
+*Examples*
+
+For a local filesystem, a path such as `file:///tmp/secrets.jceks` would become: `jceks://file/tmp/secrets.jceks`
+
+|  Path URI | jceks URI |
+|-----------|-----------|
+| `hdfs://namenode.example.org:9001/user/alice/secret.jceks` | `jceks://hdfs@namenode.example.org:9001/user/alice/secret.jceks` |
+| `file:///tmp/secrets.jceks` | `jceks://file/tmp/secret.jceks` |
+| `s3a://container1/secrets/secret.jceks` | `jceks://s3a@container1/secrets/secret.jceks` |
+| `wasb://account@container/secret.jceks` | `jceks://wasb@account@container/secret.jceks` |
+| `abfs://account@container/secret.jceks` | `jceks://abfs@account@container/secret.jceks` |
+| `https://user:pass@service/secret.jceks?token=aia` | `jceks://https@user:pass@service/secret.jceks?token=aia` |
+
+
+Note that to avoid infinite recursion, filesystems such as `abfs`, `s3a`, `adls`
+and `wasb` explicitly exclude keystores stored on paths in their own filesystem
+schemes, even if they are stored in a container which uses a different set of
+credentials from those being looked up.
+
+As an example, you cannot use credentials stored in `s3a://shared/secrets/secret.jceks`
+to read the credentials for the container `s3a://private/` .
+
 #### Keystore Passwords
 
-Keystores in Java are generally protected by passwords. The primary method of protection of the keystore-based credential providers are OS level file permissions and any other policy based access protection that may exist for the target filesystem. While the password is not a primary source of protection, it is very important to understand the mechanics required and options available for managing these passwords. It is also very important to understand all the parties that will need access to the password used to protect the keystores in order to consume them at runtime.
+Keystores in Java are generally protected by passwords. The primary method of protection of the keystore-based credential providers are OS-level file permissions and any other policy based access protection that may exist for the target filesystem. While the password is not a primary source of protection, it is very important to understand the mechanics required and options available for managing these passwords. It is also very important to understand all the parties that will need access to the password used to protect the keystores in order to consume them at runtime.
 
 ##### Options
 | Option | Description | Notes |
@@ -149,14 +189,34 @@ Extremely important to consider that *all* of the runtime consumers of the crede
 |Keystore Password| Description|Sync Required|Clear Text|File Permissions|
 |:---- |:---- |:---|:---|:---|
 |Default Password|Hardcoded password is the default. Essentially, when using the default password for all keystore-based credential stores, we are leveraging the file permissions to protect the credential store and the keystore password is just a formality of persisting the keystore.|No|Yes|No (documented)|
-|Environment Variable|`HADOOP_CREDSTORE_PASSWORD` Environment variable must be set to the custom password for all keystores that may be configured in the provider path of any process that needs to access credentials from a keystore-based credential provider. There is only one env variable for the entire path of comma separated providers. It is difficult to know the passwords required for each keystore and it is suggested that the same be used for all keystore-based credential providers to avoid this issue. Setting the environment variable will likely require it to be set from a script or some other clear text storage mechanism. Environment variables for running processes are available from various unix commands.|Yes|Yes|No|
+|Environment Variable|The `HADOOP_CREDSTORE_PASSWORD` environment variable must be set to the custom password for all keystores that may be configured in the provider path of any process that needs to access credentials from a keystore-based credential provider. There is only one env variable for the entire path of comma-separated providers. It is difficult to know the passwords required for each keystore and it is suggested that the same be used for all keystore-based credential providers to avoid this issue. Setting the environment variable will likely require it to be set from a script or some other clear text storage mechanism. Environment variables for running processes are available from various unix commands.|Yes|Yes|No|
 |Password File|`hadoop.security.credstore.java-keystore-provider.password-file` configuration property must be set to the location of the "side file" that contains the custom password for all keystores that may be configured in the provider path. Any process that needs to access credentials from a keystore-based credential provider will need to have this configuration property set to the appropriate file location. There is only one password-file for the entire path of comma separated providers. It is difficult to know the passwords required for each keystore and it is therefore suggested that the same be used for all keystore-based credential providers to avoid this issue. Password-files are additional files that need to be managed, store the password in clear text and need file permissions to be set such that only those that need access to them have it. If file permissions are set inappropriately the password to access the keystores is available in clear text.|Yes|Yes|Yes|
 
 The use of the default password means that no additional communication/synchronization to runtime consumers needs to be done. The default password is known but file permissions are the primary protection of the keystore.
 
-When file permissions are thwarted, unlike "side files", there are no standard tools that can expose the protected credentials - even with the password known. Keytool requires a password that is six characters or more and doesn't know how to retrieve general secrets from a keystore. It is also limited to PKI keypairs. Editors will not review the secrets stored within the keystore, nor will `cat`, `more` or any other standard tools. This is why the keystore providers are better than "side file" storage of credentials.
+When file permissions are thwarted, unlike "side files", there are no standard tools that can expose the protected credentials - even with the password known. Keytool requires a password that is six characters or more and doesn't know how to retrieve general secrets from a keystore. It is also limited to PKI keypairs. Editors will not reveal the secrets stored within the keystore, nor will `cat`, `more` or any other standard tools. This is why the keystore providers are better than "side file" storage of credentials.
 
-That said, it is trivial for someone to write code to access the credentials stored within a keystore-based credential provider using the API. Again, when using the default password, the password is merely a formality of persisting the keystore. The *only* protection is file  permissions and OS level access policy.
+That said, it is trivial for someone to write code to access the credentials stored within a keystore-based credential provider using the API. Again, when using the default password, the password is merely a formality of persisting the keystore. The *only* protection is file permissions and OS level access policy.
 
 Users may decide to use a password "side file" to store the password for the keystores themselves and this is supported. It is just really important to be aware of the mechanics required for this level of correctness.
 
+#### Disabling fallback to plain text
+
+The `Credentials.getPassword()` operation falls back to using entries in the configuration XML files if there are no credential providers, or if a key cannot be found.
+
+This action can be disabled by changing the configuration option `hadoop.security.credential.clear-text-fallback` from `true` to `false`:
+
+```xml
+<property>
+  <name>hadoop.security.credential.clear-text-fallback</name>
+  <value>false</value>
+  <description>
+    true or false to indicate whether or not to fall back to storing credential
+    password as clear text. The default value is true. This property only works
+    when the password can't not be found from credential providers.
+  </description>
+</property>
+```
+
+Once set, *all configuration options looked up via the `getPassword()` API must
+be served via a credential provider*.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d98ca9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java
index 9c01175..cb6a1fb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProvider.java
@@ -27,6 +27,7 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.security.NoSuchAlgorithmException;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
@@ -146,14 +147,29 @@ public class TestKeyProvider {
 
   @Test
   public void testUnnestUri() throws Exception {
-    assertEquals(new Path("hdfs://nn.example.com/my/path"),
-        ProviderUtils.unnestUri(new URI("myscheme://hdfs@nn.example.com/my/path")));
-    assertEquals(new Path("hdfs://nn/my/path?foo=bar&baz=bat#yyy"),
-        ProviderUtils.unnestUri(new URI("myscheme://hdfs@nn/my/path?foo=bar&baz=bat#yyy")));
-    assertEquals(new Path("inner://hdfs@nn1.example.com/my/path"),
-        ProviderUtils.unnestUri(new URI("outer://inner@hdfs@nn1.example.com/my/path")));
-    assertEquals(new Path("user:///"),
-        ProviderUtils.unnestUri(new URI("outer://user/")));
+    assertUnwraps("hdfs://nn.example.com/my/path",
+        "myscheme://hdfs@nn.example.com/my/path");
+    assertUnwraps("hdfs://nn/my/path?foo=bar&baz=bat#yyy",
+        "myscheme://hdfs@nn/my/path?foo=bar&baz=bat#yyy");
+    assertUnwraps("inner://hdfs@nn1.example.com/my/path",
+        "outer://inner@hdfs@nn1.example.com/my/path");
+    assertUnwraps("user:///", "outer://user/");
+    assertUnwraps("wasb://account@container/secret.jceks",
+        "jceks://wasb@account@container/secret.jceks");
+    assertUnwraps("abfs://account@container/secret.jceks",
+        "jceks://abfs@account@container/secret.jceks");
+    assertUnwraps("s3a://container/secret.jceks",
+        "jceks://s3a@container/secret.jceks");
+    assertUnwraps("file:///tmp/secret.jceks",
+        "jceks://file/tmp/secret.jceks");
+    assertUnwraps("https://user:pass@service/secret.jceks?token=aia",
+        "jceks://https@user:pass@service/secret.jceks?token=aia");
+  }
+
+  protected void assertUnwraps(final String unwrapped, final String outer)
+      throws URISyntaxException {
+    assertEquals(new Path(unwrapped),
+        ProviderUtils.unnestUri(new URI(outer)));
   }
 
   private static class MyKeyProvider extends KeyProvider {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HDDS-728. Datanodes should use different ContainerStateMachine for each pipeline. Contributed by Mukul Kumar Singh.

Posted by su...@apache.org.
HDDS-728. Datanodes should use different ContainerStateMachine for each pipeline.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/902345de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/902345de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/902345de

Branch: refs/heads/HDFS-12943
Commit: 902345de66b7ee4ceb03ae4a61ea96c4b6b6eaa7
Parents: bfb720e
Author: Nanda kumar <na...@apache.org>
Authored: Mon Oct 29 19:53:52 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Mon Oct 29 19:53:52 2018 +0530

----------------------------------------------------------------------
 .../statemachine/DatanodeStateMachine.java      |   3 +
 .../transport/server/ratis/CSMMetrics.java      |   5 +-
 .../server/ratis/ContainerStateMachine.java     |  21 ++-
 .../server/ratis/XceiverServerRatis.java        |  26 ++--
 hadoop-hdds/pom.xml                             |   2 +-
 .../hdds/scm/container/SCMContainerManager.java |   8 +-
 .../hdds/scm/pipeline/TestNodeFailure.java      |   2 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   8 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |  20 +--
 .../hadoop/ozone/client/rpc/TestBCSID.java      |   2 +-
 .../commandhandler/TestBlockDeletion.java       |   4 +-
 .../hadoop/ozone/web/client/TestKeys.java       |   2 +-
 hadoop-ozone/pom.xml                            |   2 +-
 .../freon/TestFreonWithDatanodeFastRestart.java | 130 +++++++++++++++++++
 .../freon/TestFreonWithDatanodeRestart.java     |  53 +++-----
 15 files changed, 208 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 85fa304..4768cf8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -120,6 +122,7 @@ public class DatanodeStateMachine implements Closeable {
         .addPublisherFor(NodeReportProto.class)
         .addPublisherFor(ContainerReportsProto.class)
         .addPublisherFor(CommandStatusReportsProto.class)
+        .addPublisherFor(PipelineReportsProto.class)
         .build();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
index b6aed60..9ccf88a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.ratis.protocol.RaftGroupId;
 
 /**
  * This class is for maintaining Container State Machine statistics.
@@ -47,9 +48,9 @@ public class CSMMetrics {
   public CSMMetrics() {
   }
 
-  public static CSMMetrics create() {
+  public static CSMMetrics create(RaftGroupId gid) {
     MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME,
+    return ms.register(SOURCE_NAME + gid.toString(),
         "Container State Machine",
         new CSMMetrics());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index bcbf93f..ac0833b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -66,7 +66,6 @@ import java.util.Map;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.Executors;
 import java.util.concurrent.ExecutorService;
 import java.util.stream.Collectors;
 
@@ -112,6 +111,7 @@ public class ContainerStateMachine extends BaseStateMachine {
       LoggerFactory.getLogger(ContainerStateMachine.class);
   private final SimpleStateMachineStorage storage =
       new SimpleStateMachineStorage();
+  private final RaftGroupId gid;
   private final ContainerDispatcher dispatcher;
   private ThreadPoolExecutor chunkExecutor;
   private final XceiverServerRatis ratisServer;
@@ -127,21 +127,19 @@ public class ContainerStateMachine extends BaseStateMachine {
    */
   private final CSMMetrics metrics;
 
-  public ContainerStateMachine(ContainerDispatcher dispatcher,
+  public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher,
       ThreadPoolExecutor chunkExecutor, XceiverServerRatis ratisServer,
-      int  numOfExecutors) {
+      List<ExecutorService> executors) {
+    this.gid = gid;
     this.dispatcher = dispatcher;
     this.chunkExecutor = chunkExecutor;
     this.ratisServer = ratisServer;
+    metrics = CSMMetrics.create(gid);
+    this.numExecutors = executors.size();
+    this.executors = executors.toArray(new ExecutorService[numExecutors]);
     this.writeChunkFutureMap = new ConcurrentHashMap<>();
-    metrics = CSMMetrics.create();
     this.createContainerFutureMap = new ConcurrentHashMap<>();
-    this.numExecutors = numOfExecutors;
-    executors = new ExecutorService[numExecutors];
     containerCommandCompletionMap = new ConcurrentHashMap<>();
-    for (int i = 0; i < numExecutors; i++) {
-      executors[i] = Executors.newSingleThreadExecutor();
-    }
   }
 
   @Override
@@ -207,7 +205,7 @@ public class ContainerStateMachine extends BaseStateMachine {
       throws IOException {
     final ContainerCommandRequestProto proto =
         getRequestProto(request.getMessage().getContent());
-
+    Preconditions.checkArgument(request.getRaftGroupId().equals(gid));
     final StateMachineLogEntryProto log;
     if (proto.getCmdType() == Type.WriteChunk) {
       final WriteChunkRequestProto write = proto.getWriteChunk();
@@ -557,8 +555,5 @@ public class ContainerStateMachine extends BaseStateMachine {
 
   @Override
   public void close() throws IOException {
-    for (int i = 0; i < numExecutors; i++) {
-      executors[i].shutdown();
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index b5092d9..599f821 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -76,6 +76,8 @@ import java.util.Objects;
 import java.util.UUID;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -94,11 +96,12 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   private final int port;
   private final RaftServer server;
   private ThreadPoolExecutor chunkExecutor;
+  private final List<ExecutorService> executors;
+  private final ContainerDispatcher dispatcher;
   private ClientId clientId = ClientId.randomId();
   private final StateContext context;
   private final ReplicationLevel replicationLevel;
   private long nodeFailureTimeoutMs;
-  private ContainerStateMachine stateMachine;
 
   private XceiverServerRatis(DatanodeDetails dd, int port,
       ContainerDispatcher dispatcher, Configuration conf, StateContext context)
@@ -121,18 +124,22 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     this.replicationLevel =
         conf.getEnum(OzoneConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY,
             OzoneConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT);
-    stateMachine = new ContainerStateMachine(dispatcher, chunkExecutor, this,
-        numContainerOpExecutors);
+    this.executors = new ArrayList<>();
+    this.dispatcher = dispatcher;
+    for (int i = 0; i < numContainerOpExecutors; i++) {
+      executors.add(Executors.newSingleThreadExecutor());
+    }
+
     this.server = RaftServer.newBuilder()
         .setServerId(RatisHelper.toRaftPeerId(dd))
         .setProperties(serverProperties)
-        .setStateMachine(stateMachine)
+        .setStateMachineRegistry(this::getStateMachine)
         .build();
   }
 
-  @VisibleForTesting
-  public ContainerStateMachine getStateMachine() {
-    return stateMachine;
+  private ContainerStateMachine getStateMachine(RaftGroupId gid) {
+    return new ContainerStateMachine(gid, dispatcher, chunkExecutor,
+            this, Collections.unmodifiableList(executors));
   }
 
   private RaftProperties newRaftProperties(Configuration conf) {
@@ -310,8 +317,11 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   @Override
   public void stop() {
     try {
-      chunkExecutor.shutdown();
+      // shutdown server before the executors as while shutting down,
+      // some of the tasks would be executed using the executors.
       server.close();
+      chunkExecutor.shutdown();
+      executors.forEach(ExecutorService::shutdown);
     } catch (IOException e) {
       throw new RuntimeException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-hdds/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index bedf78d..f960e90 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -45,7 +45,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <hdds.version>0.4.0-SNAPSHOT</hdds.version>
 
     <!-- Apache Ratis version -->
-    <ratis.version>0.3.0-aa38160-SNAPSHOT</ratis.version>
+    <ratis.version>0.3.0-2272086-SNAPSHOT</ratis.version>
 
     <bouncycastle.version>1.60</bouncycastle.version>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 1666b7c..0f980dc1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -523,9 +523,13 @@ public class SCMContainerManager implements ContainerManager {
       try {
         containerStateManager.updateContainerReplica(id, replica);
         ContainerInfo currentInfo = containerStateManager.getContainer(id);
-        if (newInfo.getState() == LifeCycleState.CLOSING
-            && currentInfo.getState() == LifeCycleState.CLOSED) {
+        if (newInfo.getState() == LifeCycleState.CLOSED
+            && currentInfo.getState() == LifeCycleState.CLOSING) {
           currentInfo = updateContainerStateInternal(id, LifeCycleEvent.CLOSE);
+          if (!currentInfo.isOpen()) {
+            pipelineManager.removeContainerFromPipeline(
+                currentInfo.getPipelineID(), id);
+          }
         }
 
         HddsProtos.SCMContainerInfo newState =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
index 45886c6..9a1c705 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java
@@ -118,7 +118,7 @@ public class TestNodeFailure {
         pipelineManager.getPipeline(ratisContainer2.getPipeline().getId())
             .getPipelineState());
     // Now restart the datanode and make sure that a new pipeline is created.
-    cluster.restartHddsDatanode(dnToFail);
+    cluster.restartHddsDatanode(dnToFail, true);
     ContainerWithPipeline ratisContainer3 =
         containerManager.allocateContainer(RATIS, THREE, "testOwner");
     //Assert that new container is not created from the ratis 2 pipeline

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index d13efb4..3aad7f7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -156,16 +156,16 @@ public interface MiniOzoneCluster {
    *
    * @param i index of HddsDatanode in the MiniOzoneCluster
    */
-  void restartHddsDatanode(int i) throws InterruptedException,
-      TimeoutException;
+  void restartHddsDatanode(int i, boolean waitForDatanode)
+      throws InterruptedException, TimeoutException;
 
   /**
    * Restart a particular HddsDatanode.
    *
    * @param dn HddsDatanode in the MiniOzoneCluster
    */
-  void restartHddsDatanode(DatanodeDetails dn) throws InterruptedException,
-      TimeoutException, IOException;
+  void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode)
+      throws InterruptedException, TimeoutException, IOException;
   /**
    * Shutdown a particular HddsDatanode.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index ae52451..11bc0e0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -232,8 +232,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
   }
 
   @Override
-  public void restartHddsDatanode(int i) throws InterruptedException,
-      TimeoutException {
+  public void restartHddsDatanode(int i, boolean waitForDatanode)
+      throws InterruptedException, TimeoutException {
     HddsDatanodeService datanodeService = hddsDatanodes.get(i);
     datanodeService.stop();
     datanodeService.join();
@@ -248,20 +248,24 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, ratisPort);
     conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, false);
     hddsDatanodes.remove(i);
-    // wait for node to be removed from SCM healthy node list.
-    waitForClusterToBeReady();
+    if (waitForDatanode) {
+      // wait for node to be removed from SCM healthy node list.
+      waitForClusterToBeReady();
+    }
     HddsDatanodeService service =
         HddsDatanodeService.createHddsDatanodeService(conf);
     hddsDatanodes.add(i, service);
     service.start(null);
-    // wait for the node to be identified as a healthy node again.
-    waitForClusterToBeReady();
+    if (waitForDatanode) {
+      // wait for the node to be identified as a healthy node again.
+      waitForClusterToBeReady();
+    }
   }
 
   @Override
-  public void restartHddsDatanode(DatanodeDetails dn)
+  public void restartHddsDatanode(DatanodeDetails dn, boolean waitForDatanode)
       throws InterruptedException, TimeoutException, IOException {
-    restartHddsDatanode(getHddsDatanodeIndex(dn));
+    restartHddsDatanode(getHddsDatanodeIndex(dn), waitForDatanode);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
index ed4629c..98099be 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java
@@ -137,7 +137,7 @@ public class TestBCSID {
         omKeyLocationInfo.getBlockCommitSequenceId());
 
     // verify that on restarting the datanode, it reloads the BCSID correctly.
-    cluster.restartHddsDatanode(0);
+    cluster.restartHddsDatanode(0, true);
     Assert.assertEquals(blockCommitSequenceId,
         cluster.getHddsDatanodes().get(0).getDatanodeStateMachine()
             .getContainer().getContainerSet()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index e4cbad5..63346d2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -177,7 +177,7 @@ public class TestBlockDeletion {
     // Containers in the DN and SCM should have same delete transactionIds
     // after DN restart. The assertion is just to verify that the state of
     // containerInfos in dn and scm is consistent after dn restart.
-    cluster.restartHddsDatanode(0);
+    cluster.restartHddsDatanode(0, true);
     matchContainerTransactionIds();
 
     // verify PENDING_DELETE_STATUS event is fired
@@ -210,7 +210,7 @@ public class TestBlockDeletion {
     GenericTestUtils.waitFor(() -> logCapturer.getOutput()
             .contains("RetriableDatanodeCommand type=deleteBlocksCommand"),
         500, 5000);
-    cluster.restartHddsDatanode(0);
+    cluster.restartHddsDatanode(0, true);
   }
 
   private void verifyTransactionsCommitted() throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 1ecedcc..08905eb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -326,7 +326,7 @@ public class TestKeys {
 
   private static void restartDatanode(MiniOzoneCluster cluster, int datanodeIdx)
       throws Exception {
-    cluster.restartHddsDatanode(datanodeIdx);
+    cluster.restartHddsDatanode(datanodeIdx, true);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 5e53134..2fcffab 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -33,7 +33,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <hadoop.version>3.2.1-SNAPSHOT</hadoop.version>
     <hdds.version>0.4.0-SNAPSHOT</hdds.version>
     <ozone.version>0.4.0-SNAPSHOT</ozone.version>
-    <ratis.version>0.3.0-aa38160-SNAPSHOT</ratis.version>
+    <ratis.version>0.3.0-2272086-SNAPSHOT</ratis.version>
     <bouncycastle.version>1.60</bouncycastle.version>
     <ozone.release>Badlands</ozone.release>
     <declared.ozone.version>${ozone.version}</declared.ozone.version>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
new file mode 100644
index 0000000..44f6f1d
--- /dev/null
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.freon;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.container.common.transport
+    .server.XceiverServerSpi;
+import org.apache.hadoop.ozone.container.common.transport.server.ratis
+    .XceiverServerRatis;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.server.impl.RaftServerImpl;
+import org.apache.ratis.server.impl.RaftServerProxy;
+import org.apache.ratis.server.protocol.TermIndex;
+import org.apache.ratis.statemachine.StateMachine;
+import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
+import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests Freon with Datanode restarts without waiting for pipeline to close.
+ */
+public class TestFreonWithDatanodeFastRestart {
+
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    cluster = MiniOzoneCluster.newBuilder(conf)
+      .setHbProcessorInterval(1000)
+      .setHbInterval(1000)
+      .setNumDatanodes(3)
+      .build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testRestart() throws Exception {
+    startFreon();
+    StateMachine sm = getStateMachine();
+    TermIndex termIndexBeforeRestart = sm.getLastAppliedTermIndex();
+    cluster.restartHddsDatanode(0, false);
+    sm = getStateMachine();
+    SimpleStateMachineStorage storage =
+        (SimpleStateMachineStorage)sm.getStateMachineStorage();
+    SingleFileSnapshotInfo snapshotInfo = storage.getLatestSnapshot();
+    TermIndex termInSnapshot = snapshotInfo.getTermIndex();
+    String expectedSnapFile =
+        storage.getSnapshotFile(termIndexBeforeRestart.getTerm(),
+            termIndexBeforeRestart.getIndex()).getAbsolutePath();
+    Assert.assertEquals(snapshotInfo.getFile().getPath().toString(),
+        expectedSnapFile);
+    Assert.assertEquals(termInSnapshot, termIndexBeforeRestart);
+
+    // After restart the term index might have progressed to apply pending
+    // transactions.
+    TermIndex termIndexAfterRestart = sm.getLastAppliedTermIndex();
+    Assert.assertTrue(termIndexAfterRestart.getIndex() >=
+        termIndexBeforeRestart.getIndex());
+    startFreon();
+  }
+
+  private void startFreon() throws Exception {
+    RandomKeyGenerator randomKeyGenerator =
+        new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
+    randomKeyGenerator.setNumOfVolumes(1);
+    randomKeyGenerator.setNumOfBuckets(1);
+    randomKeyGenerator.setNumOfKeys(1);
+    randomKeyGenerator.setType(ReplicationType.RATIS);
+    randomKeyGenerator.setFactor(ReplicationFactor.THREE);
+    randomKeyGenerator.setKeySize(20971520);
+    randomKeyGenerator.setValidateWrites(true);
+    randomKeyGenerator.call();
+    Assert.assertEquals(1, randomKeyGenerator.getNumberOfVolumesCreated());
+    Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
+    Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
+    Assert.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount());
+  }
+
+  private StateMachine getStateMachine() throws Exception {
+    XceiverServerSpi server =
+        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().
+            getContainer().getServer(HddsProtos.ReplicationType.RATIS);
+    RaftServerProxy proxy =
+        (RaftServerProxy)(((XceiverServerRatis)server).getServer());
+    RaftGroupId groupId = proxy.getGroupIds().iterator().next();
+    RaftServerImpl impl = proxy.getImpl(groupId);
+    return impl.getStateMachine();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/902345de/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
index a1c50b6..7cb53d3 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
@@ -18,17 +18,11 @@
 
 package org.apache.hadoop.ozone.freon;
 
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
-import org.apache.ratis.server.protocol.TermIndex;
-import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
-import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -36,7 +30,10 @@ import org.junit.Test;
 
 import java.util.concurrent.TimeUnit;
 
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_STALENODE_INTERVAL;
 
 /**
  * Tests Freon with Datanode restarts.
@@ -56,6 +53,12 @@ public class TestFreonWithDatanodeRestart {
   public static void init() throws Exception {
     conf = new OzoneConfiguration();
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 5, TimeUnit.SECONDS);
+    conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1,
+        TimeUnit.SECONDS);
+    conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1,
+        TimeUnit.SECONDS);
+    conf.setTimeDuration(OZONE_SCM_CONTAINER_CREATION_LEASE_TIMEOUT, 5,
+        TimeUnit.SECONDS);
     cluster = MiniOzoneCluster.newBuilder(conf)
       .setHbProcessorInterval(1000)
       .setHbInterval(1000)
@@ -76,6 +79,12 @@ public class TestFreonWithDatanodeRestart {
 
   @Test
   public void testRestart() throws Exception {
+    startFreon();
+    cluster.restartHddsDatanode(0, true);
+    startFreon();
+  }
+
+  private void startFreon() throws Exception {
     RandomKeyGenerator randomKeyGenerator =
         new RandomKeyGenerator((OzoneConfiguration) cluster.getConf());
     randomKeyGenerator.setNumOfVolumes(1);
@@ -90,33 +99,5 @@ public class TestFreonWithDatanodeRestart {
     Assert.assertEquals(1, randomKeyGenerator.getNumberOfBucketsCreated());
     Assert.assertEquals(1, randomKeyGenerator.getNumberOfKeysAdded());
     Assert.assertEquals(0, randomKeyGenerator.getUnsuccessfulValidationCount());
-
-    ContainerStateMachine sm = getStateMachine();
-    TermIndex termIndexBeforeRestart = sm.getLastAppliedTermIndex();
-    cluster.restartHddsDatanode(0);
-    sm = getStateMachine();
-    SimpleStateMachineStorage storage =
-        (SimpleStateMachineStorage)sm.getStateMachineStorage();
-    SingleFileSnapshotInfo snapshotInfo = storage.getLatestSnapshot();
-    TermIndex termInSnapshot = snapshotInfo.getTermIndex();
-    String expectedSnapFile =
-        storage.getSnapshotFile(termIndexBeforeRestart.getTerm(),
-            termIndexBeforeRestart.getIndex()).getAbsolutePath();
-    Assert.assertEquals(snapshotInfo.getFile().getPath().toString(),
-        expectedSnapFile);
-    Assert.assertEquals(termInSnapshot, termIndexBeforeRestart);
-
-    // After restart the term index might have progressed to apply pending
-    // transactions.
-    TermIndex termIndexAfterRestart = sm.getLastAppliedTermIndex();
-    Assert.assertTrue(termIndexAfterRestart.getIndex() >=
-        termIndexBeforeRestart.getIndex());
-  }
-
-  private ContainerStateMachine getStateMachine() {
-    XceiverServerSpi server =
-        cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().
-            getContainer().getServer(HddsProtos.ReplicationType.RATIS);
-    return ((XceiverServerRatis)server).getStateMachine();
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HDDS-762. Fix unit test failure for TestContainerSQLCli & TestSCMMetrics. Contributed by Mukul Kumar Singh.

Posted by su...@apache.org.
HDDS-762. Fix unit test failure for TestContainerSQLCli & TestSCMMetrics.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e33b61f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e33b61f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e33b61f3

Branch: refs/heads/HDFS-12943
Commit: e33b61f3351c09b00717f6eef32ff7d24345d06e
Parents: f747f5b
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Oct 30 19:16:52 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Oct 30 19:16:52 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/pom.xml                                       |  2 +-
 .../common/transport/server/ratis/TestCSMMetrics.java     | 10 +++++++---
 hadoop-ozone/pom.xml                                      |  2 +-
 .../org/apache/hadoop/ozone/scm/TestContainerSQLCli.java  |  3 ++-
 4 files changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e33b61f3/hadoop-hdds/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index f960e90..090a537 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -45,7 +45,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <hdds.version>0.4.0-SNAPSHOT</hdds.version>
 
     <!-- Apache Ratis version -->
-    <ratis.version>0.3.0-2272086-SNAPSHOT</ratis.version>
+    <ratis.version>0.3.0-1d2ebee-SNAPSHOT</ratis.version>
 
     <bouncycastle.version>1.60</bouncycastle.version>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e33b61f3/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
index a5a9641..67db7ff 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
 import static org.apache.ratis.rpc.SupportedRpcType.GRPC;
+import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.util.CheckedBiConsumer;
 
 import java.util.function.BiConsumer;
@@ -104,7 +105,8 @@ public class TestCSMMetrics {
       client.connect();
 
       // Before Read Chunk/Write Chunk
-      MetricsRecordBuilder metric = getMetrics(CSMMetrics.SOURCE_NAME);
+      MetricsRecordBuilder metric = getMetrics(CSMMetrics.SOURCE_NAME +
+          RaftGroupId.valueOf(pipeline.getId().getId()).toString());
       assertCounter("NumWriteStateMachineOps", 0L, metric);
       assertCounter("NumReadStateMachineOps", 0L, metric);
       assertCounter("NumApplyTransactionOps", 0L, metric);
@@ -120,7 +122,8 @@ public class TestCSMMetrics {
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
           response.getResult());
 
-      metric = getMetrics(CSMMetrics.SOURCE_NAME);
+      metric = getMetrics(CSMMetrics.SOURCE_NAME +
+              RaftGroupId.valueOf(pipeline.getId().getId()).toString());
       assertCounter("NumWriteStateMachineOps", 1L, metric);
       assertCounter("NumApplyTransactionOps", 1L, metric);
 
@@ -132,7 +135,8 @@ public class TestCSMMetrics {
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
           response.getResult());
 
-      metric = getMetrics(CSMMetrics.SOURCE_NAME);
+      metric = getMetrics(CSMMetrics.SOURCE_NAME +
+          RaftGroupId.valueOf(pipeline.getId().getId()).toString());
       assertCounter("NumReadStateMachineOps", 1L, metric);
       assertCounter("NumApplyTransactionOps", 1L, metric);
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e33b61f3/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 2fcffab..33af31b 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -33,7 +33,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <hadoop.version>3.2.1-SNAPSHOT</hadoop.version>
     <hdds.version>0.4.0-SNAPSHOT</hdds.version>
     <ozone.version>0.4.0-SNAPSHOT</ozone.version>
-    <ratis.version>0.3.0-2272086-SNAPSHOT</ratis.version>
+    <ratis.version>0.3.0-1d2ebee-SNAPSHOT</ratis.version>
     <bouncycastle.version>1.60</bouncycastle.version>
     <ozone.release>Badlands</ozone.release>
     <declared.ozone.version>${ozone.version}</declared.ozone.version>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e33b61f3/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index 9117838..054e668 100644
--- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -122,7 +123,7 @@ public class TestContainerSQLCli {
     eventQueue = new EventQueue();
     nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
     PipelineManager pipelineManager =
-        cluster.getStorageContainerManager().getPipelineManager();
+        new SCMPipelineManager(conf, nodeManager, eventQueue);
     containerManager = new SCMContainerManager(conf, nodeManager,
         pipelineManager, eventQueue);
     blockManager = new BlockManagerImpl(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDDS-773. Loading ozone s3 bucket browser could be failed. Contributed by Elek Marton.

Posted by su...@apache.org.
HDDS-773. Loading ozone s3 bucket browser could be failed. Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/478b2cba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/478b2cba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/478b2cba

Branch: refs/heads/HDFS-12943
Commit: 478b2cba0de5aadf655ac0b5a607760d46cc2a1e
Parents: b519f3f
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Wed Oct 31 07:54:23 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Wed Oct 31 07:54:23 2018 -0700

----------------------------------------------------------------------
 hadoop-ozone/dist/src/main/smoketest/s3/README.md      |  2 +-
 hadoop-ozone/s3gateway/pom.xml                         |  6 ++++++
 .../hadoop/ozone/s3/endpoint/BucketEndpoint.java       | 13 ++++++++-----
 3 files changed, 15 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/478b2cba/hadoop-ozone/dist/src/main/smoketest/s3/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/README.md b/hadoop-ozone/dist/src/main/smoketest/s3/README.md
index 884ba2e..70ccda7 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/README.md
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/README.md
@@ -23,5 +23,5 @@ You need to
   3. Set bucket/endpointurl during the robot test execution
 
 ```
-robot -v bucket:ozonetest -v OZONE_S3_SET_CREDENTIALS:false -v ENDPOINT_URL:https://s3.us-east-2.amazonaws.com smoketest/s3
+robot -v bucket:ozonetest -v OZONE_TEST:false -v OZONE_S3_SET_CREDENTIALS:false -v ENDPOINT_URL:https://s3.us-east-2.amazonaws.com smoketest/s3
 ```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/478b2cba/hadoop-ozone/s3gateway/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml
index 06012cf..52eee5d 100644
--- a/hadoop-ozone/s3gateway/pom.xml
+++ b/hadoop-ozone/s3gateway/pom.xml
@@ -174,5 +174,11 @@
       <version>2.15.0</version>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>com.google.code.findbugs</groupId>
+      <artifactId>findbugs</artifactId>
+      <version>3.0.1</version>
+      <scope>provided</scope>
+    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/478b2cba/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
index 04e2348..bfbbb33 100644
--- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
+++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteResponse.Error;
 import org.apache.hadoop.ozone.s3.exception.OS3Exception;
 import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
 
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.ozone.s3.util.S3utils;
 import org.apache.http.HttpStatus;
@@ -70,6 +71,7 @@ public class BucketEndpoint extends EndpointBase {
    * for more details.
    */
   @GET
+  @SuppressFBWarnings
   public Response list(
       @PathParam("bucket") String bucketName,
       @QueryParam("delimiter") String delimiter,
@@ -83,12 +85,12 @@ public class BucketEndpoint extends EndpointBase {
       @Context HttpHeaders hh) throws OS3Exception, IOException {
 
     if (browser != null) {
-      try (InputStream browserPage = getClass()
-          .getResourceAsStream("/browser.html")) {
-        return Response.ok(browserPage,
+      InputStream browserPage = getClass()
+          .getResourceAsStream("/browser.html");
+      return Response.ok(browserPage,
             MediaType.TEXT_HTML_TYPE)
             .build();
-      }
+
     }
 
     if (prefix == null) {
@@ -295,7 +297,8 @@ public class BucketEndpoint extends EndpointBase {
     keyMetadata.setSize(next.getDataSize());
     keyMetadata.setETag("" + next.getModificationTime());
     keyMetadata.setStorageClass("STANDARD");
-    keyMetadata.setLastModified(Instant.ofEpochMilli(next.getModificationTime()));
+    keyMetadata.setLastModified(Instant.ofEpochMilli(
+        next.getModificationTime()));
     response.addKey(keyMetadata);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-8930. CGroup-based strict container memory enforcement does not work with CGroupElasticMemoryController (haibochen via rkanter)

Posted by su...@apache.org.
YARN-8930. CGroup-based strict container memory enforcement does not work with CGroupElasticMemoryController (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f76e3c3d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f76e3c3d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f76e3c3d

Branch: refs/heads/HDFS-12943
Commit: f76e3c3db789dd6866fa0fef8e014cbfe8c8f80d
Parents: fb2b72e
Author: Robert Kanter <rk...@apache.org>
Authored: Thu Oct 25 10:43:36 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Thu Oct 25 11:09:47 2018 -0700

----------------------------------------------------------------------
 .../CGroupsMemoryResourceHandlerImpl.java       |  25 ----
 .../linux/resources/MemoryResourceHandler.java  |  10 --
 .../monitor/ContainersMonitorImpl.java          | 116 ++++++++-----------
 .../TestCGroupsMemoryResourceHandlerImpl.java   |  44 -------
 .../site/markdown/NodeManagerCGroupsMemory.md   |  12 +-
 5 files changed, 60 insertions(+), 147 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76e3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
index 053b796..ee5ce2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java
@@ -34,9 +34,6 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileg
 import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Optional;
-
-import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL;
 
 /**
  * Handler class to handle the memory controller. YARN already ships a
@@ -174,26 +171,4 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler {
   public List<PrivilegedOperation> teardown() throws ResourceHandlerException {
     return null;
   }
-
-  @Override
-  public Optional<Boolean> isUnderOOM(ContainerId containerId) {
-    try {
-      String status = cGroupsHandler.getCGroupParam(
-          CGroupsHandler.CGroupController.MEMORY,
-          containerId.toString(),
-          CGROUP_PARAM_MEMORY_OOM_CONTROL);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("cgroups OOM status for " + containerId + ": " + status);
-      }
-      if (status.contains(CGroupsHandler.UNDER_OOM)) {
-        LOG.warn("Container " + containerId + " under OOM based on cgroups.");
-        return Optional.of(true);
-      } else {
-        return Optional.of(false);
-      }
-    } catch (ResourceHandlerException e) {
-      LOG.warn("Could not read cgroups" + containerId, e);
-    }
-    return Optional.empty();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76e3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
index 1729fc1..013a49f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/MemoryResourceHandler.java
@@ -20,18 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resourc
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-
-import java.util.Optional;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public interface MemoryResourceHandler extends ResourceHandler {
-  /**
-   * check whether a container is under OOM.
-   * @param containerId the id of the container
-   * @return empty if the status is unknown, true is the container is under oom,
-   *         false otherwise
-   */
-  Optional<Boolean> isUnderOOM(ContainerId containerId);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76e3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index e5726c8..b9e2c68 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController;
-import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.MemoryResourceHandler;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.slf4j.Logger;
@@ -52,7 +51,6 @@ import org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree;
 import java.util.Arrays;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 
 /**
@@ -699,75 +697,61 @@ public class ContainersMonitorImpl extends AbstractService implements
                             ProcessTreeInfo ptInfo,
                             long currentVmemUsage,
                             long currentPmemUsage) {
-      Optional<Boolean> isMemoryOverLimit = Optional.empty();
+      if (strictMemoryEnforcement && !elasticMemoryEnforcement) {
+        // When cgroup-based strict memory enforcement is used alone without
+        // elastic memory control, the oom-kill would take care of it.
+        // However, when elastic memory control is also enabled, the oom killer
+        // would be disabled at the root yarn container cgroup level (all child
+        // cgroups would inherit that setting). Hence, we fall back to the
+        // polling-based mechanism.
+        return;
+      }
+      boolean isMemoryOverLimit = false;
       String msg = "";
       int containerExitStatus = ContainerExitStatus.INVALID;
 
-      if (strictMemoryEnforcement && elasticMemoryEnforcement) {
-        // Both elastic memory control and strict memory control are enabled
-        // through cgroups. A container will be frozen by the elastic memory
-        // control mechanism if it exceeds its request, so we check for this
-        // here and kill it. Otherwise, the container will not be killed if
-        // the node never exceeds its limit and the procfs-based
-        // memory accounting is different from the cgroup-based accounting.
-
-        MemoryResourceHandler handler =
-            ResourceHandlerModule.getMemoryResourceHandler();
-        if (handler != null) {
-          isMemoryOverLimit = handler.isUnderOOM(containerId);
-          containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
-          msg = containerId + " is under oom because it exceeded its" +
-              " physical memory limit";
-        }
-      } else if (strictMemoryEnforcement || elasticMemoryEnforcement) {
-        // if cgroup-based memory control is enabled
-        isMemoryOverLimit = Optional.of(false);
-      }
-
-      if (!isMemoryOverLimit.isPresent()) {
-        long vmemLimit = ptInfo.getVmemLimit();
-        long pmemLimit = ptInfo.getPmemLimit();
-        // as processes begin with an age 1, we want to see if there
-        // are processes more than 1 iteration old.
-        long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1);
-        long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
-        if (isVmemCheckEnabled()
-            && isProcessTreeOverLimit(containerId.toString(),
-            currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) {
-          // The current usage (age=0) is always higher than the aged usage. We
-          // do not show the aged size in the message, base the delta on the
-          // current usage
-          long delta = currentVmemUsage - vmemLimit;
-          // Container (the root process) is still alive and overflowing
-          // memory.
-          // Dump the process-tree and then clean it up.
-          msg = formatErrorMessage("virtual",
-              formatUsageString(currentVmemUsage, vmemLimit,
-                  currentPmemUsage, pmemLimit),
-              pId, containerId, pTree, delta);
-          isMemoryOverLimit = Optional.of(true);
-          containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM;
-        } else if (isPmemCheckEnabled()
-            && isProcessTreeOverLimit(containerId.toString(),
-            currentPmemUsage, curRssMemUsageOfAgedProcesses,
-            pmemLimit)) {
-          // The current usage (age=0) is always higher than the aged usage. We
-          // do not show the aged size in the message, base the delta on the
-          // current usage
-          long delta = currentPmemUsage - pmemLimit;
-          // Container (the root process) is still alive and overflowing
-          // memory.
-          // Dump the process-tree and then clean it up.
-          msg = formatErrorMessage("physical",
-              formatUsageString(currentVmemUsage, vmemLimit,
-                  currentPmemUsage, pmemLimit),
-              pId, containerId, pTree, delta);
-          isMemoryOverLimit = Optional.of(true);
-          containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
-        }
+      long vmemLimit = ptInfo.getVmemLimit();
+      long pmemLimit = ptInfo.getPmemLimit();
+      // as processes begin with an age 1, we want to see if there
+      // are processes more than 1 iteration old.
+      long curMemUsageOfAgedProcesses = pTree.getVirtualMemorySize(1);
+      long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
+      if (isVmemCheckEnabled()
+          && isProcessTreeOverLimit(containerId.toString(),
+          currentVmemUsage, curMemUsageOfAgedProcesses, vmemLimit)) {
+        // The current usage (age=0) is always higher than the aged usage. We
+        // do not show the aged size in the message, base the delta on the
+        // current usage
+        long delta = currentVmemUsage - vmemLimit;
+        // Container (the root process) is still alive and overflowing
+        // memory.
+        // Dump the process-tree and then clean it up.
+        msg = formatErrorMessage("virtual",
+            formatUsageString(currentVmemUsage, vmemLimit,
+                currentPmemUsage, pmemLimit),
+            pId, containerId, pTree, delta);
+        isMemoryOverLimit = true;
+        containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_VMEM;
+      } else if (isPmemCheckEnabled()
+          && isProcessTreeOverLimit(containerId.toString(),
+          currentPmemUsage, curRssMemUsageOfAgedProcesses,
+          pmemLimit)) {
+        // The current usage (age=0) is always higher than the aged usage. We
+        // do not show the aged size in the message, base the delta on the
+        // current usage
+        long delta = currentPmemUsage - pmemLimit;
+        // Container (the root process) is still alive and overflowing
+        // memory.
+        // Dump the process-tree and then clean it up.
+        msg = formatErrorMessage("physical",
+            formatUsageString(currentVmemUsage, vmemLimit,
+                currentPmemUsage, pmemLimit),
+            pId, containerId, pTree, delta);
+        isMemoryOverLimit = true;
+        containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
       }
 
-      if (isMemoryOverLimit.isPresent() && isMemoryOverLimit.get()) {
+      if (isMemoryOverLimit) {
         // Virtual or physical memory over limit. Fail the container and
         // remove
         // the corresponding process tree

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76e3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
index 4d3e7e6..0d001bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsMemoryResourceHandlerImpl.java
@@ -31,9 +31,6 @@ import org.junit.Test;
 import org.junit.Assert;
 
 import java.util.List;
-import java.util.Optional;
-
-import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupsHandler.CGROUP_PARAM_MEMORY_OOM_CONTROL;
 import static org.mockito.Mockito.*;
 
 /**
@@ -244,45 +241,4 @@ public class TestCGroupsMemoryResourceHandlerImpl {
         .updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id,
             CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, "1024M");
   }
-
-  @Test
-  public void testContainerUnderOom() throws Exception {
-    Configuration conf = new YarnConfiguration();
-    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
-    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
-
-    cGroupsMemoryResourceHandler.bootstrap(conf);
-
-    ContainerId containerId = mock(ContainerId.class);
-    when(containerId.toString()).thenReturn("container_01_01");
-
-    when(mockCGroupsHandler.getCGroupParam(
-        CGroupsHandler.CGroupController.MEMORY,
-        containerId.toString(),
-        CGROUP_PARAM_MEMORY_OOM_CONTROL)).thenReturn(CGroupsHandler.UNDER_OOM);
-    Optional<Boolean> outOfOom =
-        cGroupsMemoryResourceHandler.isUnderOOM(containerId);
-    Assert.assertTrue("The container should be reported to run under oom",
-        outOfOom.isPresent() && outOfOom.get().equals(true));
-
-    when(mockCGroupsHandler.getCGroupParam(
-        CGroupsHandler.CGroupController.MEMORY,
-        containerId.toString(),
-        CGROUP_PARAM_MEMORY_OOM_CONTROL)).thenReturn("");
-    outOfOom = cGroupsMemoryResourceHandler.isUnderOOM(containerId);
-    Assert.assertTrue(
-        "The container should not be reported to run under oom",
-        outOfOom.isPresent() && outOfOom.get().equals(false));
-
-    when(mockCGroupsHandler.getCGroupParam(
-        CGroupsHandler.CGroupController.MEMORY,
-        containerId.toString(),
-        CGROUP_PARAM_MEMORY_OOM_CONTROL)).
-        thenThrow(new ResourceHandlerException());
-    outOfOom = cGroupsMemoryResourceHandler.isUnderOOM(containerId);
-    Assert.assertFalse(
-        "No report of the oom status should be available.",
-        outOfOom.isPresent());
-
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76e3c3d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md
index ec93234..d1988a5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/NodeManagerCGroupsMemory.md
@@ -20,8 +20,6 @@ YARN has multiple features to enforce container memory limits. There are three t
 2. Strict memory control kills each container that has exceeded its limits. It is using the OOM killer capability of the cgroups Linux kernel feature.
 3. Elastic memory control is also based on cgroups. It allows bursting and starts killing containers only, if the overall system memory usage reaches a limit.
 
-If you use 2. or 3. feature 1. is disabled.
-
 Strict Memory Feature
 ---------------------
 
@@ -131,3 +129,13 @@ Configure the cgroups prerequisites mentioned above.
 `yarn.nodemanager.resource.memory.enforced` should be `false`
 
 `yarn.nodemanager.pmem-check-enabled` or `yarn.nodemanager.vmem-check-enabled` should be `true`. If swapping is turned off the former should be set, the latter should be set otherwise.
+
+
+Configuring elastic memory control and strict container memory enforcement through cgroups
+------------------------------------------
+ADVANCED ONLY
+Elastic memory control and strict container memory enforcement can be enabled at the same time to allow Node Manager to over-allocate itself.
+However, elastic memory control changes how strict container memory enforcement through cgroups is performed. Elastic memory control
+disables the oom killer on the root yarn container cgroup. The oom killer setting overrides that of individual container cgroups, so individual
+containers won't be killed by the oom killer when they go over their memory limit. The strict container memory enforcement in this case falls
+back to the polling-based mechanism.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: Revert "HADOOP-15864. Job submitter / executor fail when SBN domain name can not resolved. Contributed by He Xiaoqiao."

Posted by su...@apache.org.
Revert "HADOOP-15864. Job submitter / executor fail when SBN domain name can not resolved. Contributed by He Xiaoqiao."

This reverts commit fb2b72e6fce019130e10964a644b94cddbab1c06.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63e7134d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63e7134d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63e7134d

Branch: refs/heads/HDFS-12943
Commit: 63e7134df6c4521e973707edcf99746e4f72f03d
Parents: 199703f
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Sun Oct 28 17:04:59 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Sun Oct 28 17:04:59 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/security/SecurityUtil.java    |  9 ++++----
 .../namenode/ha/TestDelegationTokensWithHA.java | 23 --------------------
 2 files changed, 4 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63e7134d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index b573234..aa12b93 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -424,7 +424,7 @@ public final class SecurityUtil {
    */
   public static void setTokenService(Token<?> token, InetSocketAddress addr) {
     Text service = buildTokenService(addr);
-    if (token != null && service != null) {
+    if (token != null) {
       token.setService(service);
       if (LOG.isDebugEnabled()) {
         LOG.debug("Acquired token "+token);  // Token#toString() prints service
@@ -444,10 +444,9 @@ public final class SecurityUtil {
     String host = null;
     if (useIpForTokenService) {
       if (addr.isUnresolved()) { // host has no ip address
-        LOG.warn("unable to resolve host name " + addr
-            + ". Failure to construct a correct token service "
-            + "name may result in operation failures");
-        return null;
+        throw new IllegalArgumentException(
+            new UnknownHostException(addr.getHostName())
+        );
       }
       host = addr.getAddress().getHostAddress();
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63e7134d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
index e78cee9..7076ec6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
@@ -332,29 +332,6 @@ public class TestDelegationTokensWithHA {
     }    
   }
 
-  @Test(timeout = 300000)
-  public void testHAUtilClonesDTsDomainNameResolvedFail() throws Exception {
-    final Token<DelegationTokenIdentifier> token =
-        getDelegationToken(fs, "JobTracker");
-
-    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
-
-    URI haUri = new URI("hdfs://my-ha-uri/");
-    token.setService(HAUtilClient.buildTokenServiceForLogicalUri(haUri,
-        HdfsConstants.HDFS_URI_SCHEME));
-    ugi.addToken(token);
-
-    Collection<InetSocketAddress> nnAddrs = new HashSet<InetSocketAddress>();
-    nnAddrs.add(new InetSocketAddress("domainname.doesnot.exist",
-        nn0.getNameNodeAddress().getPort()));
-    nnAddrs.add(new InetSocketAddress("localhost",
-        nn1.getNameNodeAddress().getPort()));
-    HAUtilClient.cloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs);
-
-    Collection<Token<? extends TokenIdentifier>> tokens = ugi.getTokens();
-    assertEquals(3, tokens.size());
-  }
-
   /**
    * HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an
    * exception if the URI is a logical URI. This bug fails the combination of


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDFS-14028. HDFS OIV temporary dir deletes folder. Contributed by Adam Antal.

Posted by su...@apache.org.
HDFS-14028. HDFS OIV temporary dir deletes folder.
Contributed by Adam Antal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f10d7e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f10d7e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f10d7e2

Branch: refs/heads/HDFS-12943
Commit: 4f10d7e23fc0b757a9e94bc448187a6211b90f10
Parents: 1851d06
Author: Anu Engineer <ae...@apache.org>
Authored: Fri Oct 26 16:41:23 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri Oct 26 16:41:23 2018 -0700

----------------------------------------------------------------------
 .../offlineImageViewer/PBImageTextWriter.java    |  4 ++--
 .../TestOfflineImageViewer.java                  | 19 +++++++++++++++++++
 2 files changed, 21 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f10d7e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
index 5a1644c..ee78395 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -298,7 +297,8 @@ abstract class PBImageTextWriter implements Closeable {
     LevelDBMetadataMap(String baseDir) throws IOException {
       File dbDir = new File(baseDir);
       if (dbDir.exists()) {
-        FileUtils.deleteDirectory(dbDir);
+        throw new IOException("Folder " + dbDir + " already exists! Delete " +
+            "manually or provide another (not existing) directory!");
       }
       if (!dbDir.mkdirs()) {
         throw new IOException("Failed to mkdir on " + dbDir);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f10d7e2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index 5574d7b..8c8d404 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -669,6 +669,25 @@ public class TestOfflineImageViewer {
     }
   }
 
+  @Test(expected = IOException.class)
+  public void testDelimitedWithExistingFolder() throws IOException,
+      InterruptedException {
+    File tempDelimitedDir = null;
+    try {
+      String tempDelimitedDirName = "tempDirDelimited";
+      String tempDelimitedDirPath = new FileSystemTestHelper().
+          getTestRootDir() + "/" + tempDelimitedDirName;
+      tempDelimitedDir = new File(tempDelimitedDirPath);
+      Assert.assertTrue("Couldn't create temp directory!",
+          tempDelimitedDir.mkdirs());
+      testPBDelimitedWriter(tempDelimitedDirPath);
+    } finally {
+      if (tempDelimitedDir != null) {
+        FileUtils.deleteDirectory(tempDelimitedDir);
+      }
+    }
+  }
+
   private void testPBDelimitedWriter(String db)
       throws IOException, InterruptedException {
     final String DELIMITER = "\t";


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HADOOP-15815. Upgrade Eclipse Jetty version to 9.3.24. Cotributed by Boris Vulikh.

Posted by su...@apache.org.
HADOOP-15815. Upgrade Eclipse Jetty version to 9.3.24. Cotributed by Boris Vulikh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e28c00c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e28c00c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e28c00c2

Branch: refs/heads/HDFS-12943
Commit: e28c00c290a06813d156a845b93e806f71413dbf
Parents: 7574d18
Author: Sunil G <su...@apache.org>
Authored: Fri Oct 26 13:45:48 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Fri Oct 26 13:46:15 2018 +0530

----------------------------------------------------------------------
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e28c00c2/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 963d24b..2247109 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -35,7 +35,7 @@
 
     <failIfNoTests>false</failIfNoTests>
     <maven.test.redirectTestOutputToFile>true</maven.test.redirectTestOutputToFile>
-    <jetty.version>9.3.19.v20170502</jetty.version>
+    <jetty.version>9.3.24.v20180605</jetty.version>
     <test.exclude>_</test.exclude>
     <test.exclude.pattern>_</test.exclude.pattern>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-8569. Create an interface to provide cluster information to application. Contributed by Eric Yang

Posted by su...@apache.org.
YARN-8569. Create an interface to provide cluster information to application. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d07e873b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d07e873b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d07e873b

Branch: refs/heads/HDFS-12943
Commit: d07e873b7db6cb317eccb4768607c1afb505c99b
Parents: 4f10d7e
Author: Billie Rinaldi <bi...@apache.org>
Authored: Thu Oct 25 09:55:05 2018 -0700
Committer: Billie Rinaldi <bi...@apache.org>
Committed: Fri Oct 26 17:57:05 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/api/ApplicationConstants.java   |   9 +-
 .../hadoop/yarn/service/ServiceMaster.java      |   7 +
 .../hadoop/yarn/service/ServiceScheduler.java   |  67 +++++++++
 .../yarn/service/client/ServiceClient.java      | 141 ++++++++++++++++++-
 .../hadoop/yarn/service/utils/HttpUtil.java     | 123 ++++++++++++++++
 .../yarn/service/utils/ServiceApiUtil.java      |   2 -
 .../hadoop/yarn/service/TestServiceAM.java      |  32 ++++-
 .../server/nodemanager/ContainerExecutor.java   |  12 ++
 .../nodemanager/DefaultContainerExecutor.java   |   7 +
 .../nodemanager/LinuxContainerExecutor.java     |  43 ++++++
 .../linux/privileged/PrivilegedOperation.java   |   6 +-
 .../runtime/DockerLinuxContainerRuntime.java    |  18 +++
 .../linux/runtime/docker/DockerRunCommand.java  |   6 +
 .../nodemanager/webapp/NMWebServices.java       |  27 ++++
 .../impl/container-executor.c                   |  97 +++++++++++++
 .../impl/container-executor.h                   |  22 ++-
 .../main/native/container-executor/impl/main.c  |  30 +++-
 .../impl/utils/string-utils.c                   |   9 ++
 .../impl/utils/string-utils.h                   |   6 +
 .../test/test-container-executor.c              |  61 ++++++++
 .../nodemanager/TestLinuxContainerExecutor.java |  11 ++
 .../TestContainersMonitorResourceChange.java    |   4 +
 .../src/site/markdown/DockerContainers.md       |  16 +++
 23 files changed, 745 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
index 490e95e..eb03fb2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java
@@ -273,7 +273,14 @@ public interface ApplicationConstants {
      * Final, Docker run support ENTRY_POINT.
      */
     YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE(
-        "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE");
+        "YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE"),
+
+    /**
+     * $YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE
+     * Final, expose cluster information to container.
+     */
+    YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE(
+        "YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE");
 
     private final String variable;
     private Environment(String variable) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
index 0caa119..9ac1753 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager;
+import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.exceptions.BadClusterStateException;
 import org.apache.hadoop.yarn.service.monitor.ServiceMonitor;
@@ -302,6 +303,12 @@ public class ServiceMaster extends CompositeService {
       LOG.info("Service state changed from {} -> {}", curState,
           scheduler.getApp().getState());
     }
+    populateYarnSysFS(scheduler);
+  }
+
+  private static void populateYarnSysFS(ServiceScheduler scheduler) {
+    Service service = scheduler.getApp();
+    scheduler.syncSysFs(service);
   }
 
   private void printSystemEnv() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index 9b9305c..249767f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -21,6 +21,9 @@ package org.apache.hadoop.yarn.service;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource.Builder;
+
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -38,6 +41,7 @@ import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
 import org.apache.hadoop.security.HadoopKerberosName;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -77,6 +81,7 @@ import org.apache.hadoop.yarn.service.provider.ProviderUtils;
 import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
 import org.apache.hadoop.yarn.service.timelineservice.ServiceMetricsSink;
 import org.apache.hadoop.yarn.service.timelineservice.ServiceTimelinePublisher;
+import org.apache.hadoop.yarn.service.utils.HttpUtil;
 import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
 import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
 import org.apache.hadoop.yarn.service.utils.ServiceUtils;
@@ -90,6 +95,7 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
 import java.text.MessageFormat;
 import java.util.Collection;
@@ -1027,4 +1033,65 @@ public class ServiceScheduler extends CompositeService {
   public ServiceUtils.ProcessTerminationHandler getTerminationHandler() {
     return terminationHandler;
   }
+
+  public void syncSysFs(Service yarnApp) {
+    boolean success = true;
+    Configuration conf = getConfig();
+    String spec;
+    boolean useKerberos = UserGroupInformation.isSecurityEnabled();
+    boolean printSyncResult = false;
+    try {
+      String port = conf.get("yarn.nodemanager.webapp.address").split(":")[1];
+      spec = ServiceApiUtil.jsonSerDeser.toJson(yarnApp);
+      for (org.apache.hadoop.yarn.service.api.records.Component c :
+          yarnApp.getComponents()) {
+        Set<String> nodes = new HashSet<String>();
+        boolean update = Boolean.parseBoolean(c.getConfiguration()
+            .getEnv(ApplicationConstants.Environment
+                .YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE.name()));
+        if (!update) {
+          continue;
+        }
+        printSyncResult = true;
+        for (org.apache.hadoop.yarn.service.api.records.Container container :
+            c.getContainers()) {
+          String bareHost = container.getBareHost();
+          nodes.add(bareHost);
+        }
+        for (String bareHost : nodes) {
+          StringBuilder requestPath = new StringBuilder();
+          if (YarnConfiguration.useHttps(conf)) {
+            requestPath.append("https://");
+          } else {
+            requestPath.append("http://");
+          }
+          requestPath.append(bareHost);
+          requestPath.append(":");
+          requestPath.append(port);
+          requestPath.append("/ws/v1/node/yarn/sysfs/");
+          requestPath.append(UserGroupInformation.getCurrentUser()
+              .getShortUserName());
+          requestPath.append("/");
+          requestPath.append(yarnApp.getId());
+          if (!useKerberos) {
+            requestPath.append("?user.name=");
+            requestPath.append(UserGroupInformation.getCurrentUser()
+                .getShortUserName());
+          }
+          Builder builder = HttpUtil.connect(requestPath.toString());
+          ClientResponse response = builder.put(ClientResponse.class, spec);
+          if (response.getStatus()!=ClientResponse.Status.OK.getStatusCode()) {
+            LOG.warn("Error synchronize YARN sysfs: " +
+                response.getEntity(String.class));
+            success = false;
+          }
+        }
+      }
+      if (printSyncResult && success) {
+        LOG.info("YARN sysfs synchronized.");
+      }
+    } catch (IOException | URISyntaxException | InterruptedException e) {
+      LOG.error("Fail to sync service spec: {}", e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index 42f04da..91d6367 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -20,10 +20,14 @@ package org.apache.hadoop.yarn.service.client;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.compress.utils.IOUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.retry.RetryNTimes;
+import org.apache.curator.shaded.com.google.common.io.Files;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -73,6 +77,8 @@ import org.apache.hadoop.yarn.service.api.records.ComponentContainers;
 import org.apache.hadoop.yarn.service.api.records.Container;
 import org.apache.hadoop.yarn.service.api.records.ContainerState;
 import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.ConfigFile;
+import org.apache.hadoop.yarn.service.api.records.ConfigFile.TypeEnum;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.conf.SliderExitCodes;
@@ -97,12 +103,18 @@ import org.apache.hadoop.yarn.util.Times;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.BufferedOutputStream;
 import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
 import java.text.MessageFormat;
 import java.util.*;
 import java.util.concurrent.ConcurrentHashMap;
@@ -929,6 +941,8 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     addJarResource(serviceName, localResources);
     // add keytab if in secure env
     addKeytabResourceIfSecure(fs, localResources, app);
+    // add yarn sysfs to localResources
+    addYarnSysFs(appRootDir, localResources, app);
     if (LOG.isDebugEnabled()) {
       printLocalResources(localResources);
     }
@@ -938,8 +952,8 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     String cmdStr = buildCommandLine(app, conf, appRootDir, hasAMLog4j);
     submissionContext.setResource(Resource.newInstance(YarnServiceConf
         .getLong(YarnServiceConf.AM_RESOURCE_MEM,
-            YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM, app.getConfiguration(),
-            conf), 1));
+            YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM,
+            app.getConfiguration(), conf), 1));
     String queue = app.getQueue();
     if (StringUtils.isEmpty(queue)) {
       queue = conf.get(YARN_QUEUE, DEFAULT_YARN_QUEUE);
@@ -963,6 +977,128 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     return submissionContext.getApplicationId();
   }
 
+  /**
+   * Compress (tar) the input files to the output file.
+   *
+   * @param files The files to compress
+   * @param output The resulting output file (should end in .tar.gz)
+   * @param bundleRoot
+   * @throws IOException
+   */
+  public static File compressFiles(Collection<File> files, File output,
+      String bundleRoot) throws IOException {
+    try (FileOutputStream fos = new FileOutputStream(output);
+        TarArchiveOutputStream taos = new TarArchiveOutputStream(
+            new BufferedOutputStream(fos))) {
+      taos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU);
+      for (File f : files) {
+        addFilesToCompression(taos, f, "sysfs", bundleRoot);
+      }
+    }
+    return output;
+  }
+
+  /**
+   * Compile file list for compression and going recursive for
+   * nested directories.
+   *
+   * @param taos The archive
+   * @param file The file to add to the archive
+   * @param dir The directory that should serve as
+   *            the parent directory in the archive
+   * @throws IOException
+   */
+  private static void addFilesToCompression(TarArchiveOutputStream taos,
+      File file, String dir, String bundleRoot) throws IOException {
+    if (!file.isHidden()) {
+      // Create an entry for the file
+      if (!dir.equals(".")) {
+        if (File.separator.equals("\\")) {
+          dir = dir.replaceAll("\\\\", "/");
+        }
+      }
+      taos.putArchiveEntry(
+          new TarArchiveEntry(file, dir + "/" + file.getName()));
+      if (file.isFile()) {
+        // Add the file to the archive
+        try (FileInputStream input = new FileInputStream(file)) {
+          IOUtils.copy(input, taos);
+          taos.closeArchiveEntry();
+        }
+      } else if (file.isDirectory()) {
+        // close the archive entry
+        if (!dir.equals(".")) {
+          taos.closeArchiveEntry();
+        }
+        // go through all the files in the directory and using recursion, add
+        // them to the archive
+        File[] allFiles = file.listFiles();
+        if (allFiles != null) {
+          for (File childFile : allFiles) {
+            addFilesToCompression(taos, childFile,
+                file.getPath().substring(bundleRoot.length()), bundleRoot);
+          }
+        }
+      }
+    }
+  }
+
+  private void addYarnSysFs(Path path,
+      Map<String, LocalResource> localResources, Service app)
+          throws IOException {
+    List<Component> componentsWithYarnSysFS = new ArrayList<Component>();
+    for(Component c : app.getComponents()) {
+      boolean enabled = Boolean.parseBoolean(c.getConfiguration()
+          .getEnv(ApplicationConstants.Environment
+              .YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE.name()));
+      if (enabled) {
+        componentsWithYarnSysFS.add(c);
+      }
+    }
+    if(componentsWithYarnSysFS.size() == 0) {
+      return;
+    }
+    String buffer = ServiceApiUtil.jsonSerDeser.toJson(app);
+    File tmpDir = Files.createTempDir();
+    if (tmpDir.exists()) {
+      String serviceJsonPath = tmpDir.getAbsolutePath() + "/app.json";
+      File localFile = new File(serviceJsonPath);
+      if (localFile.createNewFile()) {
+        try (Writer writer = new OutputStreamWriter(
+            new FileOutputStream(localFile), StandardCharsets.UTF_8)) {
+          writer.write(buffer);
+        }
+      } else {
+        throw new IOException("Fail to write app.json to temp directory");
+      }
+      File destinationFile = new File(tmpDir.getAbsolutePath() + "/sysfs.tar");
+      if (!destinationFile.createNewFile()) {
+        throw new IOException("Fail to localize sysfs.tar.");
+      }
+      List<File> files = new ArrayList<File>();
+      files.add(localFile);
+      compressFiles(files, destinationFile, "sysfs");
+      LocalResource localResource =
+          fs.submitFile(destinationFile, path, ".", "sysfs.tar");
+      Path serviceJson = new Path(path, "sysfs.tar");
+      for (Component c  : componentsWithYarnSysFS) {
+        ConfigFile e = new ConfigFile();
+        e.type(TypeEnum.ARCHIVE);
+        e.srcFile(serviceJson.toString());
+        e.destFile("/hadoop/yarn");
+        if (!c.getConfiguration().getFiles().contains(e)) {
+          c.getConfiguration().getFiles().add(e);
+        }
+      }
+      localResources.put("sysfs", localResource);
+      if (!tmpDir.delete()) {
+        LOG.warn("Failed to delete temp file: " + tmpDir.getAbsolutePath());
+      }
+    } else {
+      throw new IOException("Fail to localize sysfs resource.");
+    }
+  }
+
   private void setLogAggregationContext(Service app, Configuration conf,
       ApplicationSubmissionContext submissionContext) {
     LogAggregationContext context = Records.newRecord(LogAggregationContext
@@ -1565,4 +1701,5 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
       this.principalName = principalName;
     }
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java
new file mode 100644
index 0000000..ac5c079
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/HttpUtil.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.security.PrivilegedExceptionAction;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MediaType;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.ietf.jgss.GSSContext;
+import org.ietf.jgss.GSSException;
+import org.ietf.jgss.GSSManager;
+import org.ietf.jgss.GSSName;
+import org.ietf.jgss.Oid;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.WebResource.Builder;
+
+/**
+ * Http connection utilities.
+ *
+ */
+public class HttpUtil {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HttpUtil.class);
+  private static final Base64 BASE_64_CODEC = new Base64(0);
+
+  protected HttpUtil() {
+    // prevents calls from subclass
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Generate SPNEGO challenge request token.
+   *
+   * @param server - hostname to contact
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public static String generateToken(String server) throws
+      IOException, InterruptedException {
+    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
+    LOG.debug("The user credential is {}", currentUser);
+    String challenge = currentUser
+        .doAs(new PrivilegedExceptionAction<String>() {
+          @Override
+          public String run() throws Exception {
+            try {
+              // This Oid for Kerberos GSS-API mechanism.
+              Oid mechOid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
+              GSSManager manager = GSSManager.getInstance();
+              // GSS name for server
+              GSSName serverName = manager.createName("HTTP@" + server,
+                  GSSName.NT_HOSTBASED_SERVICE);
+              // Create a GSSContext for authentication with the service.
+              // We're passing client credentials as null since we want them to
+              // be read from the Subject.
+              GSSContext gssContext = manager.createContext(
+                  serverName.canonicalize(mechOid), mechOid, null,
+                  GSSContext.DEFAULT_LIFETIME);
+              gssContext.requestMutualAuth(true);
+              gssContext.requestCredDeleg(true);
+              // Establish context
+              byte[] inToken = new byte[0];
+              byte[] outToken = gssContext.initSecContext(inToken, 0,
+                  inToken.length);
+              gssContext.dispose();
+              // Base64 encoded and stringified token for server
+              LOG.debug("Got valid challenge for host {}", serverName);
+              return new String(BASE_64_CODEC.encode(outToken),
+                  StandardCharsets.US_ASCII);
+            } catch (GSSException | IllegalAccessException
+                | NoSuchFieldException | ClassNotFoundException e) {
+              LOG.error("Error: {}", e);
+              throw new AuthenticationException(e);
+            }
+          }
+        });
+    return challenge;
+  }
+
+  public static Builder connect(String url) throws URISyntaxException,
+      IOException, InterruptedException {
+    boolean useKerberos = UserGroupInformation.isSecurityEnabled();
+    URI resource = new URI(url);
+    Client client = Client.create();
+    Builder builder = client
+        .resource(url).type(MediaType.APPLICATION_JSON);
+    if (useKerberos) {
+      String challenge = generateToken(resource.getHost());
+      builder.header(HttpHeaders.AUTHORIZATION, "Negotiate " +
+          challenge);
+      LOG.debug("Authorization: Negotiate {}", challenge);
+    }
+    return builder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
index cab7ddc..b57e632 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -95,8 +95,6 @@ public class ServiceApiUtil {
   private static final PatternValidator userNamePattern
       = new PatternValidator("[a-z][a-z0-9-.]*");
 
-
-
   @VisibleForTesting
   public static void setJsonSerDeser(JsonSerDeser jsd) {
     jsonSerDeser = jsd;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
index 21e93fa..80f4910 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.service;
 
-import com.google.common.base.Supplier;
 import com.google.common.collect.ImmutableMap;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.test.TestingCluster;
@@ -426,4 +425,35 @@ public class TestServiceAM extends ServiceTestUtils{
         am.getComponent("compa").getPendingInstances().size());
     am.stop();
   }
+
+  @Test(timeout = 30000)
+  public void testSyncSysFS() {
+    ApplicationId applicationId = ApplicationId.newInstance(
+        System.currentTimeMillis(), 1);
+    Service exampleApp = new Service();
+    exampleApp.setId(applicationId.toString());
+    exampleApp.setVersion("v1");
+    exampleApp.setName("tensorflow");
+
+    Component compA = createComponent("compa", 1, "pwd");
+    compA.getConfiguration().getEnv().put(
+        "YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE", "true");
+    Artifact artifact = new Artifact();
+    artifact.setType(Artifact.TypeEnum.TARBALL);
+    compA.artifact(artifact);
+    exampleApp.addComponent(compA);
+    try {
+      MockServiceAM am = new MockServiceAM(exampleApp);
+      am.init(conf);
+      am.start();
+      ServiceScheduler scheduler = am.context.scheduler;
+      scheduler.syncSysFs(exampleApp);
+      scheduler.close();
+      am.stop();
+      am.close();
+    } catch (Exception e) {
+      LOG.error("Fail to sync sysfs: {}", e);
+      Assert.fail("Fail to sync sysfs.");
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index b3a6df1..6024dbf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -259,6 +259,18 @@ public abstract class ContainerExecutor implements Configurable {
       throws IOException;
 
   /**
+   * Update cluster information inside container.
+   *
+   * @param ctx ContainerRuntimeContext
+   * @param user Owner of application
+   * @param appId YARN application ID
+   * @param spec Service Specification
+   * @throws IOException if there is a failure while writing spec to disk
+   */
+  public abstract void updateYarnSysFS(Context ctx, String user,
+      String appId, String spec) throws IOException;
+
+  /**
    * Recover an already existing container. This is a blocking call and returns
    * only when the container exits.  Note that the container must have been
    * activated prior to this call.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
index b552c1f..a500c02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.service.ServiceStateException;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.CommandExecutor;
 import org.apache.hadoop.util.Shell.ExitCodeException;
@@ -1038,4 +1039,10 @@ public class DefaultContainerExecutor extends ContainerExecutor {
     }
     return paths;
   }
+
+  @Override
+  public void updateYarnSysFS(Context ctx, String user,
+      String appId, String spec) throws IOException {
+    throw new ServiceStateException("Implementation unavailable");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 3946eed..0282f58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -67,6 +67,7 @@ import java.io.ByteArrayOutputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.charset.Charset;
@@ -996,4 +997,46 @@ public class LinuxContainerExecutor extends ContainerExecutor {
           "containerId: {}. Exception: ", containerId, e);
     }
   }
+
+  @Override
+  public synchronized void updateYarnSysFS(Context ctx, String user,
+      String appId, String spec) throws IOException {
+    LocalDirsHandlerService dirsHandler = nmContext.getLocalDirsHandler();
+    Path sysFSPath = dirsHandler.getLocalPathForWrite(
+        "nmPrivate/" + appId + "/sysfs/app.json");
+    File file = new File(sysFSPath.toString());
+    List<String> localDirs = dirsHandler.getLocalDirs();
+    if (file.exists()) {
+      if (!file.delete()) {
+        LOG.warn("Unable to delete " + sysFSPath.toString());
+      }
+    }
+    if (file.createNewFile()) {
+      FileOutputStream output = new FileOutputStream(file);
+      try {
+        output.write(spec.getBytes("UTF-8"));
+      } finally {
+        output.close();
+      }
+    }
+    PrivilegedOperation privOp = new PrivilegedOperation(
+        PrivilegedOperation.OperationType.SYNC_YARN_SYSFS);
+    String runAsUser = getRunAsUser(user);
+    privOp.appendArgs(runAsUser,
+        user,
+        Integer.toString(PrivilegedOperation.RunAsUserCommand
+        .SYNC_YARN_SYSFS.getValue()),
+        appId, StringUtils.join(PrivilegedOperation
+            .LINUX_FILE_PATH_SEPARATOR, localDirs));
+    privOp.disableFailureLogging();
+    PrivilegedOperationExecutor privilegedOperationExecutor =
+        PrivilegedOperationExecutor.getInstance(nmContext.getConf());
+    try {
+      privilegedOperationExecutor.executePrivilegedOperation(null,
+            privOp, null, null, false, false);
+    } catch (PrivilegedOperationException e) {
+      throw new IOException(e);
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
index 92a82e8..f199662 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/privileged/PrivilegedOperation.java
@@ -56,7 +56,8 @@ public class PrivilegedOperation {
     LIST_AS_USER(""), // no CLI switch supported yet.
     ADD_NUMA_PARAMS(""), // no CLI switch supported yet.
     REMOVE_DOCKER_CONTAINER("--remove-docker-container"),
-    INSPECT_DOCKER_CONTAINER("--inspect-docker-container");
+    INSPECT_DOCKER_CONTAINER("--inspect-docker-container"),
+    SYNC_YARN_SYSFS("");
 
     private final String option;
 
@@ -153,7 +154,8 @@ public class PrivilegedOperation {
     SIGNAL_CONTAINER(2),
     DELETE_AS_USER(3),
     LAUNCH_DOCKER_CONTAINER(4),
-    LIST_AS_USER(5);
+    LIST_AS_USER(5),
+    SYNC_YARN_SYSFS(6);
 
     private int value;
     RunAsUserCommand(int value) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 436c0ad..2cfa9c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -179,6 +179,12 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
  *     This feature is disabled by default. When this feature is disabled or set
  *     to false, the container will be removed as soon as it exits.
  *   </li>
+ *   <li>
+ *     {@code YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE} allows export yarn
+ *     service json to docker container.  This feature is disabled by default.
+ *     when this feature is set, app.json will be available in
+ *     /hadoop/yarn/sysfs/app.json.
+ *   </li>
  * </ul>
  */
 @InterfaceAudience.Private
@@ -231,6 +237,11 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
   @InterfaceAudience.Private
   public static final String ENV_DOCKER_CONTAINER_DELAYED_REMOVAL =
       "YARN_CONTAINER_RUNTIME_DOCKER_DELAYED_REMOVAL";
+  @InterfaceAudience.Private
+  public static final String ENV_DOCKER_CONTAINER_YARN_SYSFS =
+      "YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE";
+  public static final String YARN_SYSFS_PATH =
+      "/hadoop/yarn/sysfs";
   private Configuration conf;
   private Context nmContext;
   private DockerClient dockerClient;
@@ -964,6 +975,12 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
 
     addCGroupParentIfRequired(resourcesOpts, containerIdStr, runCommand);
 
+    if(environment.containsKey(ENV_DOCKER_CONTAINER_YARN_SYSFS) &&
+        Boolean.parseBoolean(environment
+            .get(ENV_DOCKER_CONTAINER_YARN_SYSFS))) {
+      runCommand.setYarnSysFS(true);
+    }
+
     if (useEntryPoint) {
       runCommand.setOverrideDisabled(true);
       runCommand.addEnv(environment);
@@ -1438,4 +1455,5 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
       }
     }
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
index 395c1e1..aac8224 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerRunCommand.java
@@ -217,4 +217,10 @@ public class DockerRunCommand extends DockerCommand {
   public final void addEnv(Map<String, String> environment) {
     userEnv.putAll(environment);
   }
+
+  public DockerRunCommand setYarnSysFS(boolean toggle) {
+    String value = Boolean.toString(toggle);
+    super.addCommandArguments("use-yarn-sysfs", value);
+    return this;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
index bb0881b..ca08897 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java
@@ -38,6 +38,7 @@ import org.slf4j.LoggerFactory;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
@@ -56,6 +57,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.service.ServiceStateException;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -554,6 +556,31 @@ public class NMWebServices {
     return new NMResourceInfo();
   }
 
+  @PUT
+  @Path("/yarn/sysfs/{user}/{appId}")
+  @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+                MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  public Response syncYarnSysFS(@javax.ws.rs.core.Context
+      HttpServletRequest req,
+      @PathParam("user") String user,
+      @PathParam("appId") String appId,
+      String spec) {
+    if (UserGroupInformation.isSecurityEnabled()) {
+      if (!req.getRemoteUser().equals(user)) {
+        return Response.status(Status.FORBIDDEN).build();
+      }
+    }
+    try {
+      nmContext.getContainerExecutor().updateYarnSysFS(nmContext, user, appId,
+          spec);
+    } catch (IOException | ServiceStateException e) {
+      LOG.error("Fail to sync yarn sysfs for application ID: {}, reason: ",
+          appId, e);
+      return Response.status(Status.INTERNAL_SERVER_ERROR).entity(e).build();
+    }
+    return Response.ok().build();
+  }
+
   private long parseLongParam(String bytes) {
     if (bytes == null || bytes.isEmpty()) {
       return Long.MAX_VALUE;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 1ca94fe..e2130ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -76,6 +76,7 @@ static const char* DEFAULT_BANNED_USERS[] = {"yarn", "mapred", "hdfs", "bin", 0}
 static const int DEFAULT_DOCKER_SUPPORT_ENABLED = 0;
 static const int DEFAULT_TC_SUPPORT_ENABLED = 0;
 static const int DEFAULT_MOUNT_CGROUP_SUPPORT_ENABLED = 0;
+static const int DEFAULT_YARN_SYSFS_SUPPORT_ENABLED = 0;
 
 static const char* PROC_PATH = "/proc";
 
@@ -506,6 +507,11 @@ int is_mount_cgroups_support_enabled() {
                               &executor_cfg);
 }
 
+int is_yarn_sysfs_support_enabled() {
+  return is_feature_enabled(YARN_SYSFS_SUPPORT_ENABLED_KEY,
+                            DEFAULT_YARN_SYSFS_SUPPORT_ENABLED, &executor_cfg);
+}
+
 /**
  * Utility function to concatenate argB to argA using the concat_pattern.
  */
@@ -1778,6 +1784,27 @@ int create_user_filecache_dirs(const char * user, char* const* local_dirs) {
   return rc;
 }
 
+int create_yarn_sysfs(const char* user, const char *app_id,
+    const char *container_id, const char *work_dir, char* const* local_dirs) {
+  int result = OUT_OF_MEMORY;
+  const mode_t perms = S_IRWXU | S_IXGRP;
+  char* const* local_dir_ptr;
+  for(local_dir_ptr = local_dirs; *local_dir_ptr != NULL; ++local_dir_ptr) {
+    char *container_dir = get_container_work_directory(*local_dir_ptr, user, app_id,
+                                                container_id);
+    if (container_dir == NULL) {
+      return OUT_OF_MEMORY;
+    }
+    char *yarn_sysfs_dir = make_string("%s/%s", container_dir, "sysfs");
+    if (mkdir(yarn_sysfs_dir, perms) == 0) {
+      result = 0;
+    }
+    free(yarn_sysfs_dir);
+    free(container_dir);
+  }
+  return result;
+}
+
 int launch_docker_container_as_user(const char * user, const char *app_id,
                               const char *container_id, const char *work_dir,
                               const char *script_name, const char *cred_file,
@@ -1834,6 +1861,14 @@ int launch_docker_container_as_user(const char * user, const char *app_id,
     goto cleanup;
   }
 
+  exit_code = create_yarn_sysfs(user, app_id, container_id, work_dir, local_dirs);
+  if (exit_code != 0) {
+    fprintf(ERRORFILE, "Could not create user yarn sysfs directory");
+    fflush(ERRORFILE);
+    exit(-1);
+    goto cleanup;
+  }
+
   docker_command = construct_docker_command(command_file);
   docker_binary = get_docker_binary(&CFG);
 
@@ -2799,6 +2834,68 @@ struct configuration* get_cfg() {
   return &CFG;
 }
 
+char *locate_sysfs_path(const char *src) {
+  char *result = NULL;
+  DIR *dir;
+  struct dirent *entry;
+  if (!(dir = opendir(src))) {
+    return NULL;
+  }
+  while ((entry = readdir(dir)) != NULL) {
+    if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
+      continue;
+    }
+    char *new_src = make_string("%s/%s", src, entry->d_name);
+    if (str_ends_with(new_src, "/sysfs.tar/sysfs")) {
+      result = new_src;
+      goto cleanup;
+    }
+    result = locate_sysfs_path(new_src);
+    if (result != NULL) {
+      goto cleanup;
+    }
+  }
+cleanup:
+  closedir(dir);
+  return result;
+}
+
+int sync_yarn_sysfs(char* const* local_dir, const char *running_user, const char *end_user, const char *app_id) {
+  int result = OUT_OF_MEMORY;
+  char *src = NULL;
+  char *dest = NULL;
+  char* const* local_dir_ptr;
+
+  for(local_dir_ptr = local_dir; *local_dir_ptr != NULL; ++local_dir_ptr) {
+    char *appcache_dir = make_string("%s/usercache/%s/appcache/%s", *local_dir_ptr, end_user, app_id);
+    char *sysfs_dir = locate_sysfs_path(appcache_dir);
+    char *nm_private_app_dir = make_string("%s/nmPrivate/%s/sysfs", *local_dir_ptr, app_id);
+    if (sysfs_dir == NULL) {
+      return OUT_OF_MEMORY;
+    }
+    src = make_string("%s/%s", nm_private_app_dir, "app.json");
+    dest = make_string("%s/%s", sysfs_dir, "app.json");
+    // open up the spec file
+    int spec_file = open_file_as_nm(src);
+    if (spec_file == -1) {
+      continue;
+    }
+
+    delete_path(dest, 0);
+    if (copy_file(spec_file, src, dest, S_IRWXU | S_IRGRP | S_IXGRP) == 0) {
+      result = 0;
+    }
+    // continue on to create other work directories
+    free(sysfs_dir);
+    free(src);
+    free(dest);
+    if (result == 0) {
+      break;
+    }
+  }
+  return result;
+}
+
 /**
  * Flatten docker launch command
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
index 3eb931a..1415830 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.h
@@ -32,7 +32,8 @@ enum command {
   SIGNAL_CONTAINER = 2,
   DELETE_AS_USER = 3,
   LAUNCH_DOCKER_CONTAINER = 4,
-  LIST_AS_USER = 5
+  LIST_AS_USER = 5,
+  SYNC_YARN_SYSFS = 6
 };
 
 enum operations {
@@ -49,7 +50,8 @@ enum operations {
   RUN_DOCKER = 11,
   RUN_AS_USER_LIST = 12,
   REMOVE_DOCKER_CONTAINER = 13,
-  INSPECT_DOCKER_CONTAINER = 14
+  INSPECT_DOCKER_CONTAINER = 14,
+  RUN_AS_USER_SYNC_YARN_SYSFS = 15
 };
 
 #define NM_GROUP_KEY "yarn.nodemanager.linux-container-executor.group"
@@ -67,6 +69,7 @@ enum operations {
 #define DOCKER_SUPPORT_ENABLED_KEY "feature.docker.enabled"
 #define TC_SUPPORT_ENABLED_KEY "feature.tc.enabled"
 #define MOUNT_CGROUP_SUPPORT_ENABLED_KEY "feature.mount-cgroup.enabled"
+#define YARN_SYSFS_SUPPORT_ENABLED_KEY "feature.yarn.sysfs.enabled"
 #define TMP_DIR "tmp"
 
 extern struct passwd *user_detail;
@@ -293,6 +296,21 @@ int run_docker_with_pty(const char *command_file);
  */
 int exec_docker_command(char *docker_command, char **argv, int argc);
 
+/** Check if yarn sysfs is enabled in configuration. */
+int is_yarn_sysfs_support_enabled();
+
+/**
+ * Create YARN SysFS
+ */
+int create_yarn_sysfs(const char* user, const char *app_id,
+    const char *container_id, const char *work_dir, char* const* local_dirs);
+
+/**
+ * Sync YARN SysFS
+ */
+int sync_yarn_sysfs(char* const* local_dirs, const char *running_user,
+    const char *end_user, const char *app_id);
+
 /*
  * Compile the regex_str and determine if the input string matches.
  * Return 0 on match, 1 of non-match.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
index c269fa4..7b13e7c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/main.c
@@ -99,11 +99,21 @@ static void display_usage(FILE *stream) {
     fprintf(stream, "\n");
   }
 
-   fprintf(stream,
+  fprintf(stream,
       "            signal container:      %2d container-pid signal\n"
       "            delete as user:        %2d relative-path\n"
       "            list as user:          %2d relative-path\n",
       SIGNAL_CONTAINER, DELETE_AS_USER, LIST_AS_USER);
+
+  if(is_yarn_sysfs_support_enabled()) {
+    fprintf(stream,
+        "            sync yarn sysfs:       %2d app-id nm-local-dirs\n",
+        SYNC_YARN_SYSFS);
+  } else {
+    fprintf(stream,
+        "[DISABLED]  sync yarn sysfs:       %2d app-id nm-local-dirs\n",
+        SYNC_YARN_SYSFS);
+  }
 }
 
 /* Sets up log files for normal/error logging */
@@ -566,6 +576,11 @@ static int validate_run_as_user_commands(int argc, char **argv, int *operation)
     cmd_input.target_dir = argv[optind++];
     *operation = RUN_AS_USER_LIST;
     return 0;
+  case SYNC_YARN_SYSFS:
+    cmd_input.app_id = argv[optind++];
+    cmd_input.local_dirs = argv[optind++];
+    *operation = RUN_AS_USER_SYNC_YARN_SYSFS;
+    return 0;
   default:
     fprintf(ERRORFILE, "Invalid command %d not supported.",command);
     fflush(ERRORFILE);
@@ -723,6 +738,19 @@ int main(int argc, char **argv) {
 
     exit_code = list_as_user(cmd_input.target_dir);
     break;
+  case RUN_AS_USER_SYNC_YARN_SYSFS:
+    exit_code = set_user(cmd_input.run_as_user_name);
+    if (exit_code != 0) {
+      break;
+    }
+    if (is_yarn_sysfs_support_enabled()) {
+      exit_code = sync_yarn_sysfs(split(cmd_input.local_dirs),
+          cmd_input.run_as_user_name, cmd_input.yarn_user_name,
+          cmd_input.app_id);
+    } else {
+      exit_code = FEATURE_DISABLED;
+    }
+    break;
   }
 
   flush_and_close_log_files();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
index 80511e5..68857a9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
@@ -17,6 +17,9 @@
  */
 #include "util.h"
 
+#include <unistd.h>
+#include <sys/types.h>
+#include <dirent.h>
 #include <limits.h>
 #include <errno.h>
 #include <strings.h>
@@ -180,3 +183,9 @@ char *make_string(const char *fmt, ...) {
   }
   return buf;
 }
+
+int str_ends_with(const char *s, const char *suffix) {
+    size_t slen = strlen(s);
+    size_t suffix_len = strlen(suffix);
+    return suffix_len <= slen && !strcmp(s + slen - suffix_len, suffix);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h
index affb3c3..995cdf3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.h
@@ -38,4 +38,10 @@ int get_numbers_split_by_comma(const char* input, int** numbers, size_t* n_numbe
  * String format utility
  */
 char *make_string(const char *fmt, ...);
+
+/*
+ * Compare string end with a suffix.
+ * return 1 if succeeded
+ */
+int str_ends_with(const char *s, const char *suffix);
 #endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index 327e441..437850d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -105,6 +105,7 @@ int write_config_file(char *file_name, int banned) {
     fprintf(file, "min.user.id=0\n");
   }
   fprintf(file, "allowed.system.users=allowedUser,daemon\n");
+  fprintf(file, "feature.yarn.sysfs.enabled=1\n");
   fclose(file);
   return 0;
 }
@@ -524,6 +525,63 @@ void test_is_feature_enabled() {
   free_configuration(&exec_cfg);
 }
 
+void test_yarn_sysfs() {
+  char *app_id = "app-1";
+  char *container_id = "container-1";
+  // Test create sysfs without container.
+  int result = create_yarn_sysfs(username, app_id, container_id, "work", local_dirs);
+  if (result == 0) {
+    printf("Should not be able to create yarn sysfs without container directories.\n");
+    exit(1);
+  }
+
+  result = sync_yarn_sysfs(local_dirs, username, username, app_id);
+  if (result == 0) {
+    printf("sync_yarn_sysfs failed.\n");
+    exit(1);
+  }
+
+  // Create container directories and init app.json
+  char* const* local_dir_ptr;
+  for (local_dir_ptr = local_dirs; *local_dir_ptr != 0; ++local_dir_ptr) {
+    char *user_dir = make_string("%s/usercache/%s", *local_dir_ptr, username);
+    if (mkdirs(user_dir, 0750) != 0) {
+      printf("Can not make user directories: %s\n", user_dir);
+      exit(1);
+    }
+    free(user_dir);
+    char *app_dir = make_string("%s/usercache/%s/appcache/%s/%s", *local_dir_ptr, username, app_id);
+    if (mkdirs(app_dir, 0750) != 0) {
+      printf("Can not make app directories: %s\n", app_dir);
+      exit(1);
+    }
+    free(app_dir);
+    // Simulate distributed cache created directory structures.
+    char *cache_dir = make_string("%s/usercache/%s/appcache/%s/filecache/%s/sysfs.tar/sysfs", *local_dir_ptr, username, app_id, container_id);
+    if (mkdirs(cache_dir, 0750) != 0) {
+      printf("Can not make container directories: %s\n", cache_dir);
+      exit(1);
+    }
+    free(cache_dir);
+    char *nm_dir = make_string("%s/nmPrivate/%s/sysfs", *local_dir_ptr, app_id);
+    if (mkdirs(nm_dir, 0750) != 0) {
+      printf("Can not make nmPrivate directories: %s\n", nm_dir);
+      exit(1);
+    }
+    char *sysfs_path = make_string("%s/%s", nm_dir, "app.json");
+    FILE *file = fopen(sysfs_path, "w");
+    fprintf(file, "{}\n");
+    fclose(file);
+    free(nm_dir);
+  }
+
+  result = sync_yarn_sysfs(local_dirs, username, username, app_id);
+  if (result != 0) {
+    printf("sync_yarn_sysfs failed.\n");
+    exit(1);
+  }
+}
+
 void test_delete_user() {
   printf("\nTesting delete_user\n");
   char* app_dir = get_app_directory(TEST_ROOT "/local-1", yarn_username, "app_3");
@@ -1551,6 +1609,9 @@ int main(int argc, char **argv) {
   printf("\nTesting is_feature_enabled()\n");
   test_is_feature_enabled();
 
+  printf("\nTesting yarn sysfs\n");
+  test_yarn_sysfs();
+
   test_check_user(0);
 
   test_cleaning_docker_cgroups();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
index 856d5ff..c34fb20 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java
@@ -706,6 +706,17 @@ public class TestLinuxContainerExecutor {
     verify(lce, times(1)).execContainer(ctx);
   }
 
+  @Test
+  public void testUpdateYarnSysFS() throws Exception {
+    String user = System.getProperty("user.name");
+    String appId="app-1";
+    String spec="";
+    Context ctx = mock(Context.class);
+    LinuxContainerExecutor lce = mock(LinuxContainerExecutor.class);
+    lce.updateYarnSysFS(ctx, user, appId, spec);
+    verify(lce, times(1)).updateYarnSysFS(ctx, user, appId, spec);
+  }
+
   private static class TestResourceHandler implements LCEResourcesHandler {
     static Set<ContainerId> postExecContainers = new HashSet<ContainerId>();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
index 3d535e9..d00c93b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitorResourceChange.java
@@ -128,6 +128,10 @@ public class TestContainersMonitorResourceChange {
         throws IOException {
       return true;
     }
+    @Override
+    public void updateYarnSysFS(Context ctx, String user, String appId,
+        String spec) throws IOException {
+    }
   }
 
   private static class MockContainerEventHandler implements

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d07e873b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
index 17a335e..2d6f867 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md
@@ -287,6 +287,7 @@ The following properties are optional:
 | `banned.users` | A comma-separated list of usernames who should not be allowed to launch applications. The default setting is: yarn, mapred, hdfs, and bin. |
 | `allowed.system.users` | A comma-separated list of usernames who should be allowed to launch applications even if their UIDs are below the configured minimum. If a user appears in allowed.system.users and banned.users, the user will be considered banned. |
 | `feature.tc.enabled` | Must be "true" or "false". "false" means traffic control commands are disabled. "true" means traffic control commands are allowed. |
+| `feature.yarn.sysfs.enabled` | Must be "true" or "false". See YARN sysfs support for detail. The default setting is disabled. |
 
 Part of a container-executor.cfg which allows Docker containers to be launched is below:
 
@@ -369,6 +370,7 @@ environment variables in the application's environment:
 | `YARN_CONTAINER_RUNTIME_DOCKER_MOUNTS` | Adds additional volume mounts to the Docker container. The value of the environment variable should be a comma-separated list of mounts. All such mounts must be given as `source:dest[:mode]` and the mode must be "ro" (read-only) or "rw" (read-write) to specify the type of access being requested. If neither is specified, read-write will be  assumed. The mode may include a bind propagation option. In that case, the mode should either be of the form `[option]`, `rw+[option]`, or `ro+[option]`. Valid bind propagation options are shared, rshared, slave, rslave, private, and rprivate. The requested mounts will be validated by container-executor based on the values set in container-executor.cfg for `docker.allowed.ro-mounts` and `docker.allowed.rw-mounts`. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_TMPFS_MOUNTS` | Adds additional tmpfs mounts to the Docker container. The value of the environment variable should be a comma-separated list of absolute mount points within the container. |
 | `YARN_CONTAINER_RUNTIME_DOCKER_DELAYED_REMOVAL` | Allows a user to request delayed deletion of the Docker container on a per container basis. If true, Docker containers will not be removed until the duration defined by yarn.nodemanager.delete.debug-delay-sec has elapsed. Administrators can disable this feature through the yarn-site property yarn.nodemanager.runtime.linux.docker.delayed-removal.allowed. This feature is disabled by default. When this feature is disabled or set to false, the container will be removed as soon as it exits. |
+| `YARN_CONTAINER_RUNTIME_YARN_SYSFS_ENABLE` | Enable mounting of container working directory sysfs sub-directory into Docker container /hadoop/yarn/sysfs.  This is useful for populating cluster information into container. |
 
 The first two are required. The remainder can be set as needed. While
 controlling the container type through environment variables is somewhat less
@@ -767,3 +769,17 @@ In yarn-env.sh, define:
 ```
 export YARN_CONTAINER_RUNTIME_DOCKER_RUN_OVERRIDE_DISABLE=true
 ```
+
+Docker Container YARN SysFS Support
+-----------------------------------
+
+YARN SysFS is a pseudo file system provided by the YARN framework that
+exports information about clustering information to Docker container.
+Cluster information is exported to /hadoop/yarn/sysfs path.  This
+API allows application developer to obtain clustering information
+without external service dependencies.  Custom application master can
+populate cluster information by calling node manager REST API.
+YARN service framework automatically populates cluster information
+to /hadoop/yarn/sysfs/app.json.  For more information about
+YARN service, see: [YARN Service](./yarn-service/Overview.html).
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HADOOP-15868. AliyunOSS: update document for properties of multiple part download, multiple part upload and directory copy. Contributed by Jinhu Wu.

Posted by su...@apache.org.
HADOOP-15868. AliyunOSS: update document for properties of multiple part download, multiple part upload and directory copy. Contributed by Jinhu Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7574d185
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7574d185
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7574d185

Branch: refs/heads/HDFS-12943
Commit: 7574d18538e838f40581519080d7c8621c65e53b
Parents: 38a65e3
Author: Sammi Chen <sa...@intel.com>
Authored: Fri Oct 26 15:19:56 2018 +0800
Committer: Sammi Chen <sa...@intel.com>
Committed: Fri Oct 26 15:19:56 2018 +0800

----------------------------------------------------------------------
 .../site/markdown/tools/hadoop-aliyun/index.md  | 36 ++++++++++++++++++++
 1 file changed, 36 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7574d185/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md b/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
index 0703790..0c3131d 100644
--- a/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
+++ b/hadoop-tools/hadoop-aliyun/src/site/markdown/tools/hadoop-aliyun/index.md
@@ -229,6 +229,42 @@ please raise your issues with them.
     </property>
 
     <property>
+      <name>fs.oss.upload.active.blocks</name>
+      <value>4</value>
+      <description>Active(Concurrent) upload blocks when uploading a file.</description>
+    </property>
+
+    <property>
+      <name>fs.oss.multipart.download.threads</name>
+      <value>10</value>
+      <description>The maximum number of threads allowed in the pool for multipart download and upload.</description>
+    </property>
+
+    <property>
+      <name>fs.oss.multipart.download.ahead.part.max.number</name>
+      <value>4</value>
+      <description>The maximum number of read ahead parts when reading a file.</description>
+    </property>
+
+    <property>
+      <name>fs.oss.max.total.tasks</name>
+      <value>128</value>
+      <description>The maximum queue number for multipart download and upload.</description>
+    </property>
+
+    <property>
+      <name>fs.oss.max.copy.threads</name>
+      <value>25</value>
+      <description>The maximum number of threads allowed in the pool for copy operations.</description>
+    </property>
+
+    <property>
+      <name>fs.oss.max.copy.tasks.per.dir</name>
+      <value>5</value>
+      <description>The maximum number of concurrent tasks allowed when copying a directory.</description>
+    </property>
+
+    <property>
       <name>fs.oss.multipart.upload.threshold</name>
       <value>20971520</value>
       <description>Minimum size in bytes before we start a multipart uploads or copy.</description>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: YARN-8854. Upgrade jquery datatable version references to v1.10.19. Contributed by Akhil PB.

Posted by su...@apache.org.
YARN-8854. Upgrade jquery datatable version references to v1.10.19. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d36012b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d36012b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d36012b6

Branch: refs/heads/HDFS-12943
Commit: d36012b69f01c9ddfd2e95545d1f5e1fbc1c3236
Parents: 62d98ca
Author: Sunil G <su...@apache.org>
Authored: Tue Oct 30 22:56:13 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Tue Oct 30 22:56:46 2018 +0530

----------------------------------------------------------------------
 LICENSE.txt                                     |   2 +-
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |  10 +-
 .../hadoop/yarn/webapp/view/JQueryUI.java       |   6 +-
 .../static/dt-1.10.18/css/custom_datatable.css  |  68 +++
 .../webapps/static/dt-1.10.18/css/demo_page.css | 108 ++++
 .../static/dt-1.10.18/css/demo_table.css        | 544 +++++++++++++++++++
 .../static/dt-1.10.18/css/jquery.dataTables.css | 466 ++++++++++++++++
 .../webapps/static/dt-1.10.18/css/jui-dt.css    | 352 ++++++++++++
 .../static/dt-1.10.18/images/Sorting icons.psd  | Bin 0 -> 27490 bytes
 .../static/dt-1.10.18/images/back_disabled.jpg  | Bin 0 -> 612 bytes
 .../static/dt-1.10.18/images/back_enabled.jpg   | Bin 0 -> 807 bytes
 .../static/dt-1.10.18/images/favicon.ico        | Bin 0 -> 894 bytes
 .../dt-1.10.18/images/forward_disabled.jpg      | Bin 0 -> 635 bytes
 .../dt-1.10.18/images/forward_enabled.jpg       | Bin 0 -> 852 bytes
 .../static/dt-1.10.18/images/sort_asc.png       | Bin 0 -> 263 bytes
 .../dt-1.10.18/images/sort_asc_disabled.png     | Bin 0 -> 252 bytes
 .../static/dt-1.10.18/images/sort_both.png      | Bin 0 -> 282 bytes
 .../static/dt-1.10.18/images/sort_desc.png      | Bin 0 -> 260 bytes
 .../dt-1.10.18/images/sort_desc_disabled.png    | Bin 0 -> 251 bytes
 .../dt-1.10.18/js/jquery.dataTables.min.js      | 184 +++++++
 .../webapps/static/dt-1.10.7/css/demo_page.css  | 110 ----
 .../webapps/static/dt-1.10.7/css/demo_table.css | 538 ------------------
 .../webapps/static/dt-1.10.7/css/jui-dt.css     | 322 -----------
 .../static/dt-1.10.7/images/Sorting icons.psd   | Bin 27490 -> 0 bytes
 .../static/dt-1.10.7/images/back_disabled.jpg   | Bin 612 -> 0 bytes
 .../static/dt-1.10.7/images/back_enabled.jpg    | Bin 807 -> 0 bytes
 .../webapps/static/dt-1.10.7/images/favicon.ico | Bin 894 -> 0 bytes
 .../dt-1.10.7/images/forward_disabled.jpg       | Bin 635 -> 0 bytes
 .../static/dt-1.10.7/images/forward_enabled.jpg | Bin 852 -> 0 bytes
 .../static/dt-1.10.7/images/sort_asc.png        | Bin 263 -> 0 bytes
 .../dt-1.10.7/images/sort_asc_disabled.png      | Bin 252 -> 0 bytes
 .../static/dt-1.10.7/images/sort_both.png       | Bin 282 -> 0 bytes
 .../static/dt-1.10.7/images/sort_desc.png       | Bin 260 -> 0 bytes
 .../dt-1.10.7/images/sort_desc_disabled.png     | Bin 251 -> 0 bytes
 .../dt-1.10.7/js/jquery.dataTables.min.js       | 160 ------
 35 files changed, 1733 insertions(+), 1137 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/LICENSE.txt
----------------------------------------------------------------------
diff --git a/LICENSE.txt b/LICENSE.txt
index 94c9065..1a97528 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -553,7 +553,7 @@ For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.js
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.css
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.7/
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/
 --------------------------------------------------------------------------------
 Copyright (C) 2008-2016, SpryMedia Ltd.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 133003a..641a5f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -237,10 +237,12 @@
             <exclude>src/main/resources/webapps/test/.keep</exclude>
             <exclude>src/main/resources/webapps/proxy/.keep</exclude>
             <exclude>src/main/resources/webapps/node/.keep</exclude>
-            <exclude>src/main/resources/webapps/static/dt-1.10.7/css/jui-dt.css</exclude>
-            <exclude>src/main/resources/webapps/static/dt-1.10.7/css/demo_table.css</exclude>
-            <exclude>src/main/resources/webapps/static/dt-1.10.7/images/Sorting icons.psd</exclude>
-            <exclude>src/main/resources/webapps/static/dt-1.10.7/js/jquery.dataTables.min.js</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.18/css/jquery.dataTables.css</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.18/css/custom_datatable.css</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.18/css/jui-dt.css</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.18/css/demo_table.css</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.18/images/Sorting icons.psd</exclude>
+            <exclude>src/main/resources/webapps/static/dt-1.10.18/js/jquery.dataTables.min.js</exclude>
             <exclude>src/main/resources/webapps/static/jt/jquery.jstree.js</exclude>
             <exclude>src/main/resources/webapps/static/jquery/jquery-ui-1.12.1.custom.min.js</exclude>
             <exclude>src/main/resources/webapps/static/jquery/jquery-3.3.1.min.js</exclude>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
index eef33eb..0754c36 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -66,10 +66,12 @@ public class JQueryUI extends HtmlBlock {
   @Override
   protected void render(Block html) {
     html.link(root_url("static/jquery/themes-1.9.1/base/jquery-ui.css"))
-        .link(root_url("static/dt-1.10.7/css/jui-dt.css"))
+        .link(root_url("static/dt-1.10.18/css/jquery.dataTables.css"))
+        .link(root_url("static/dt-1.10.18/css/jui-dt.css"))
+        .link(root_url("static/dt-1.10.18/css/custom_datatable.css"))
         .script(root_url("static/jquery/jquery-3.3.1.min.js"))
         .script(root_url("static/jquery/jquery-ui-1.12.1.custom.min.js"))
-        .script(root_url("static/dt-1.10.7/js/jquery.dataTables.min.js"))
+        .script(root_url("static/dt-1.10.18/js/jquery.dataTables.min.js"))
         .script(root_url("static/yarn.dt.plugins.js"))
         .script(root_url("static/dt-sorting/natural.js"))
         .style("#jsnotice { padding: 0.2em; text-align: center; }",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/custom_datatable.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/custom_datatable.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/custom_datatable.css
new file mode 100644
index 0000000..1339d67
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/custom_datatable.css
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Fixing broken datatable styles after
+ * upgrading jquery.dataTables.min.js to v1.10.18
+ */
+
+.dataTables_wrapper {
+  border: 1px solid #aaaaaa;
+}
+
+.dataTables_wrapper .dataTables_length,
+.dataTables_wrapper .dataTables_filter,
+.dataTables_wrapper .dataTables_info,
+.dataTables_wrapper .dataTables_paginate {
+  padding: 5px;
+}
+
+.dataTables_wrapper .dataTables_length,
+.dataTables_wrapper .dataTables_filter {
+  width: 50%;
+  height: 27px;
+  box-sizing: border-box;
+  background-color: #cccccc;
+}
+
+.dataTables_wrapper .dataTables_info {
+  color: #222222;
+  padding: 13px 5px;
+}
+
+table.dataTable thead th,
+table.dataTable thead td {
+  color: #555555;
+  border-top: 1px solid #aaaaaa;
+  border-bottom: 1px solid #aaaaaa;
+  border-right: 1px solid #aaaaaa;
+  background-color: #e6e6e6;
+}
+
+table.dataTable thead th:last-of-type,
+table.dataTable thead td:last-of-type {
+  border-right: none;
+}
+
+table.dataTable.no-footer {
+  border-bottom: 1px solid #aaaaaa;
+}
+
+table.dataTable tbody tr.even {
+  background: rgba(0, 0, 255, 0.05);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/demo_page.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/demo_page.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/demo_page.css
new file mode 100644
index 0000000..38e4aee
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/demo_page.css
@@ -0,0 +1,108 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * General page setup
+ */
+#dt_example {
+  font: 80%/1.45em "Lucida Grande", Verdana, Arial, Helvetica, sans-serif;
+  margin: 0;
+  padding: 0;
+  color: #333;
+  background-color: #fff;
+}
+
+#dt_example #container {
+  width: 800px;
+  margin: 30px auto;
+  padding: 0;
+}
+
+#dt_example #footer {
+  margin: 50px auto 0 auto;
+  padding: 0;
+}
+
+#dt_example #demo {
+  margin: 30px auto 0 auto;
+}
+
+#dt_example .demo_jui {
+  margin: 30px auto 0 auto;
+}
+
+#dt_example .big {
+  font-size: 1.3em;
+  font-weight: bold;
+  line-height: 1.6em;
+  color: #4e6ca3;
+}
+
+#dt_example .spacer {
+  height: 20px;
+  clear: both;
+}
+
+#dt_example .clear {
+  clear: both;
+}
+
+#dt_example pre {
+  padding: 15px;
+  background-color: #f5f5f5;
+  border: 1px solid #cccccc;
+}
+
+#dt_example h1 {
+  margin-top: 2em;
+  font-size: 1.3em;
+  font-weight: normal;
+  line-height: 1.6em;
+  color: #4e6ca3;
+  border-bottom: 1px solid #b0bed9;
+  clear: both;
+}
+
+#dt_example h2 {
+  font-size: 1.2em;
+  font-weight: normal;
+  line-height: 1.6em;
+  color: #4e6ca3;
+  clear: both;
+}
+
+#dt_example a {
+  color: #0063dc;
+  text-decoration: none;
+}
+
+#dt_example a:hover {
+  text-decoration: underline;
+}
+
+#dt_example ul {
+  color: #4e6ca3;
+}
+
+.css_right {
+  float: right;
+}
+
+.css_left {
+  float: left;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/demo_table.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/demo_table.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/demo_table.css
new file mode 100644
index 0000000..cb4f021
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/demo_table.css
@@ -0,0 +1,544 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/*
+ *  File:         demo_table.css
+ *  CVS:          $Id$
+ *  Description:  CSS descriptions for DataTables demo pages
+ *  Author:       Allan Jardine
+ *  Created:      Tue May 12 06:47:22 BST 2009
+ *  Modified:     $Date$ by $Author$
+ *  Language:     CSS
+ *  Project:      DataTables
+ *
+ *  Copyright 2009 Allan Jardine. All Rights Reserved.
+ *
+ * ***************************************************************************
+ * DESCRIPTION
+ *
+ * The styles given here are suitable for the demos that are used with the standard DataTables
+ * distribution (see www.datatables.net). You will most likely wish to modify these styles to
+ * meet the layout requirements of your site.
+ *
+ * Common issues:
+ *   'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
+ *     no conflict between the two pagination types. If you want to use full_numbers pagination
+ *     ensure that you either have "example_alt_pagination" as a body class name, or better yet,
+ *     modify that selector.
+ *   Note that the path used for Images is relative. All images are by default located in
+ *     ../images/ - relative to this CSS file.
+ */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables features
+ */
+
+.dataTables_wrapper {
+  position: relative;
+  min-height: 302px;
+  clear: both;
+  _height: 302px;
+  zoom: 1; /* Feeling sorry for IE */
+}
+
+.dataTables_processing {
+  position: absolute;
+  top: 50%;
+  left: 50%;
+  width: 250px;
+  height: 30px;
+  margin-left: -125px;
+  margin-top: -15px;
+  padding: 14px 0 2px 0;
+  border: 1px solid #ddd;
+  text-align: center;
+  color: #999;
+  font-size: 14px;
+  background-color: white;
+}
+
+.dataTables_length {
+  width: 40%;
+  float: left;
+}
+
+.dataTables_filter {
+  width: 50%;
+  float: right;
+  text-align: right;
+}
+
+.dataTables_info {
+  width: 60%;
+  float: left;
+}
+
+.dataTables_paginate {
+  width: 44px;
+  *width: 50px;
+  float: right;
+  text-align: right;
+}
+
+/* Pagination nested */
+.paginate_disabled_previous,
+.paginate_enabled_previous,
+.paginate_disabled_next,
+.paginate_enabled_next {
+  height: 19px;
+  width: 19px;
+  margin-left: 3px;
+  float: left;
+}
+
+.paginate_disabled_previous {
+  background-image: url("../images/back_disabled.jpg");
+}
+
+.paginate_enabled_previous {
+  background-image: url("../images/back_enabled.jpg");
+}
+
+.paginate_disabled_next {
+  background-image: url("../images/forward_disabled.jpg");
+}
+
+.paginate_enabled_next {
+  background-image: url("../images/forward_enabled.jpg");
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables display
+ */
+table.display {
+  margin: 0 auto;
+  clear: both;
+  width: 100%;
+
+  /* Note Firefox 3.5 and before have a bug with border-collapse
+  * ( https://bugzilla.mozilla.org/show%5Fbug.cgi?id=155955 )
+  * border-spacing: 0; is one possible option. Conditional-css.com is
+  * useful for this kind of thing
+  *
+  * Further note IE 6/7 has problems when calculating widths with border width.
+  * It subtracts one px relative to the other browsers from the first column, and
+  * adds one to the end...
+  *
+  * If you want that effect I'd suggest setting a border-top/left on th/td's and
+  * then filling in the gaps with other borders.
+  */
+}
+
+table.display thead th {
+  padding: 3px 18px 3px 10px;
+  border-bottom: 1px solid black;
+  font-weight: bold;
+  cursor: pointer;
+  *cursor: hand;
+}
+
+table.display tfoot th {
+  padding: 3px 18px 3px 10px;
+  border-top: 1px solid black;
+  font-weight: bold;
+}
+
+table.display tr.heading2 td {
+  border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+  padding: 3px 10px;
+}
+
+table.display td.center {
+  text-align: center;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables sorting
+ */
+
+.sorting_asc {
+  background: url("../images/sort_asc.png") no-repeat center right;
+}
+
+.sorting_desc {
+  background: url("../images/sort_desc.png") no-repeat center right;
+}
+
+.sorting {
+  background: url("../images/sort_both.png") no-repeat center right;
+}
+
+.sorting_asc_disabled {
+  background: url("../images/sort_asc_disabled.png") no-repeat center right;
+}
+
+.sorting_desc_disabled {
+  background: url("../images/sort_desc_disabled.png") no-repeat center right;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables row classes
+ */
+table.display tr.odd.gradeA {
+  background-color: #ddffdd;
+}
+
+table.display tr.even.gradeA {
+  background-color: #eeffee;
+}
+
+table.display tr.odd.gradeC {
+  background-color: #ddddff;
+}
+
+table.display tr.even.gradeC {
+  background-color: #eeeeff;
+}
+
+table.display tr.odd.gradeX {
+  background-color: #ffdddd;
+}
+
+table.display tr.even.gradeX {
+  background-color: #ffeeee;
+}
+
+table.display tr.odd.gradeU {
+  background-color: #ddd;
+}
+
+table.display tr.even.gradeU {
+  background-color: #eee;
+}
+
+tr.odd {
+  background-color: #e2e4ff;
+}
+
+tr.even {
+  background-color: white;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Misc
+ */
+.dataTables_scroll {
+  clear: both;
+}
+
+.dataTables_scrollBody {
+  *margin-top: -1px;
+}
+
+.top,
+.bottom {
+  padding: 15px;
+  background-color: #f5f5f5;
+  border: 1px solid #cccccc;
+}
+
+.top .dataTables_info {
+  float: none;
+}
+
+.clear {
+  clear: both;
+}
+
+.dataTables_empty {
+  text-align: center;
+}
+
+tfoot input {
+  margin: 0.5em 0;
+  width: 100%;
+  color: #444;
+}
+
+tfoot input.search_init {
+  color: #999;
+}
+
+td.group {
+  background-color: #d1cfd0;
+  border-bottom: 2px solid #a19b9e;
+  border-top: 2px solid #a19b9e;
+}
+
+td.details {
+  background-color: #d1cfd0;
+  border: 2px solid #a19b9e;
+}
+
+.example_alt_pagination div.dataTables_info {
+  width: 40%;
+}
+
+.paging_full_numbers {
+  width: 400px;
+  height: 22px;
+  line-height: 22px;
+}
+
+.paging_full_numbers span.paginate_button,
+.paging_full_numbers span.paginate_active {
+  border: 1px solid #aaa;
+  -webkit-border-radius: 5px;
+  -moz-border-radius: 5px;
+  padding: 2px 5px;
+  margin: 0 3px;
+  cursor: pointer;
+  *cursor: hand;
+}
+
+.paging_full_numbers span.paginate_button {
+  background-color: #ddd;
+}
+
+.paging_full_numbers span.paginate_button:hover {
+  background-color: #ccc;
+}
+
+.paging_full_numbers span.paginate_active {
+  background-color: #99b3ff;
+}
+
+table.display tr.even.row_selected td {
+  background-color: #b0bed9;
+}
+
+table.display tr.odd.row_selected td {
+  background-color: #9fafd1;
+}
+
+/*
+ * Sorting classes for columns
+ */
+/* For the standard odd/even */
+tr.odd td.sorting_1 {
+  background-color: #d3d6ff;
+}
+
+tr.odd td.sorting_2 {
+  background-color: #dadcff;
+}
+
+tr.odd td.sorting_3 {
+  background-color: #e0e2ff;
+}
+
+tr.even td.sorting_1 {
+  background-color: #eaebff;
+}
+
+tr.even td.sorting_2 {
+  background-color: #f2f3ff;
+}
+
+tr.even td.sorting_3 {
+  background-color: #f9f9ff;
+}
+
+/* For the Conditional-CSS grading rows */
+/*
+  Colour calculations (based off the main row colours)
+  Level 1:
+    dd > c4
+    ee > d5
+  Level 2:
+    dd > d1
+    ee > e2
+ */
+tr.odd.gradeA td.sorting_1 {
+  background-color: #c4ffc4;
+}
+
+tr.odd.gradeA td.sorting_2 {
+  background-color: #d1ffd1;
+}
+
+tr.odd.gradeA td.sorting_3 {
+  background-color: #d1ffd1;
+}
+
+tr.even.gradeA td.sorting_1 {
+  background-color: #d5ffd5;
+}
+
+tr.even.gradeA td.sorting_2 {
+  background-color: #e2ffe2;
+}
+
+tr.even.gradeA td.sorting_3 {
+  background-color: #e2ffe2;
+}
+
+tr.odd.gradeC td.sorting_1 {
+  background-color: #c4c4ff;
+}
+
+tr.odd.gradeC td.sorting_2 {
+  background-color: #d1d1ff;
+}
+
+tr.odd.gradeC td.sorting_3 {
+  background-color: #d1d1ff;
+}
+
+tr.even.gradeC td.sorting_1 {
+  background-color: #d5d5ff;
+}
+
+tr.even.gradeC td.sorting_2 {
+  background-color: #e2e2ff;
+}
+
+tr.even.gradeC td.sorting_3 {
+  background-color: #e2e2ff;
+}
+
+tr.odd.gradeX td.sorting_1 {
+  background-color: #ffc4c4;
+}
+
+tr.odd.gradeX td.sorting_2 {
+  background-color: #ffd1d1;
+}
+
+tr.odd.gradeX td.sorting_3 {
+  background-color: #ffd1d1;
+}
+
+tr.even.gradeX td.sorting_1 {
+  background-color: #ffd5d5;
+}
+
+tr.even.gradeX td.sorting_2 {
+  background-color: #ffe2e2;
+}
+
+tr.even.gradeX td.sorting_3 {
+  background-color: #ffe2e2;
+}
+
+tr.odd.gradeU td.sorting_1 {
+  background-color: #c4c4c4;
+}
+
+tr.odd.gradeU td.sorting_2 {
+  background-color: #d1d1d1;
+}
+
+tr.odd.gradeU td.sorting_3 {
+  background-color: #d1d1d1;
+}
+
+tr.even.gradeU td.sorting_1 {
+  background-color: #d5d5d5;
+}
+
+tr.even.gradeU td.sorting_2 {
+  background-color: #e2e2e2;
+}
+
+tr.even.gradeU td.sorting_3 {
+  background-color: #e2e2e2;
+}
+
+/*
+ * Row highlighting example
+ */
+.ex_highlight #example tbody tr.even:hover,
+#example tbody tr.even td.highlighted {
+  background-color: #ecffb3;
+}
+
+.ex_highlight #example tbody tr.odd:hover,
+#example tbody tr.odd td.highlighted {
+  background-color: #e6ff99;
+}
+
+.ex_highlight_row #example tr.even:hover {
+  background-color: #ecffb3;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_1 {
+  background-color: #ddff75;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_2 {
+  background-color: #e7ff9e;
+}
+
+.ex_highlight_row #example tr.even:hover td.sorting_3 {
+  background-color: #e2ff89;
+}
+
+.ex_highlight_row #example tr.odd:hover {
+  background-color: #e6ff99;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_1 {
+  background-color: #d6ff5c;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_2 {
+  background-color: #e0ff84;
+}
+
+.ex_highlight_row #example tr.odd:hover td.sorting_3 {
+  background-color: #dbff70;
+}
+
+/*
+ * KeyTable
+ */
+table.KeyTable td {
+  border: 3px solid transparent;
+}
+
+table.KeyTable td.focus {
+  border: 3px solid #3366ff;
+}
+
+table.display tr.gradeA {
+  background-color: #eeffee;
+}
+
+table.display tr.gradeC {
+  background-color: #ddddff;
+}
+
+table.display tr.gradeX {
+  background-color: #ffdddd;
+}
+
+table.display tr.gradeU {
+  background-color: #ddd;
+}
+
+div.box {
+  height: 100px;
+  padding: 10px;
+  overflow: auto;
+  border: 1px solid #8080ff;
+  background-color: #e5e5ff;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/jquery.dataTables.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/jquery.dataTables.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/jquery.dataTables.css
new file mode 100644
index 0000000..88bf2f14
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/jquery.dataTables.css
@@ -0,0 +1,466 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Table styles
+ */
+table.dataTable {
+  width: 100%;
+  margin: 0 auto;
+  clear: both;
+  border-collapse: separate;
+  border-spacing: 0;
+  /*
+   * Header and footer styles
+   */
+  /*
+   * Body styles
+   */
+}
+table.dataTable thead th,
+table.dataTable tfoot th {
+  font-weight: bold;
+}
+table.dataTable thead th,
+table.dataTable thead td {
+  padding: 10px 18px;
+  border-bottom: 1px solid #111;
+}
+table.dataTable thead th:active,
+table.dataTable thead td:active {
+  outline: none;
+}
+table.dataTable tfoot th,
+table.dataTable tfoot td {
+  padding: 10px 18px 6px 18px;
+  border-top: 1px solid #111;
+}
+table.dataTable thead .sorting,
+table.dataTable thead .sorting_asc,
+table.dataTable thead .sorting_desc,
+table.dataTable thead .sorting_asc_disabled,
+table.dataTable thead .sorting_desc_disabled {
+  cursor: pointer;
+  *cursor: hand;
+  background-repeat: no-repeat;
+  background-position: center right;
+}
+table.dataTable thead .sorting {
+  background-image: url("../images/sort_both.png");
+}
+table.dataTable thead .sorting_asc {
+  background-image: url("../images/sort_asc.png");
+}
+table.dataTable thead .sorting_desc {
+  background-image: url("../images/sort_desc.png");
+}
+table.dataTable thead .sorting_asc_disabled {
+  background-image: url("../images/sort_asc_disabled.png");
+}
+table.dataTable thead .sorting_desc_disabled {
+  background-image: url("../images/sort_desc_disabled.png");
+}
+table.dataTable tbody tr {
+  background-color: #ffffff;
+}
+table.dataTable tbody tr.selected {
+  background-color: #B0BED9;
+}
+table.dataTable tbody th,
+table.dataTable tbody td {
+  padding: 8px 10px;
+}
+table.dataTable.row-border tbody th, table.dataTable.row-border tbody td, table.dataTable.display tbody th, table.dataTable.display tbody td {
+  border-top: 1px solid #ddd;
+}
+table.dataTable.row-border tbody tr:first-child th,
+table.dataTable.row-border tbody tr:first-child td, table.dataTable.display tbody tr:first-child th,
+table.dataTable.display tbody tr:first-child td {
+  border-top: none;
+}
+table.dataTable.cell-border tbody th, table.dataTable.cell-border tbody td {
+  border-top: 1px solid #ddd;
+  border-right: 1px solid #ddd;
+}
+table.dataTable.cell-border tbody tr th:first-child,
+table.dataTable.cell-border tbody tr td:first-child {
+  border-left: 1px solid #ddd;
+}
+table.dataTable.cell-border tbody tr:first-child th,
+table.dataTable.cell-border tbody tr:first-child td {
+  border-top: none;
+}
+table.dataTable.stripe tbody tr.odd, table.dataTable.display tbody tr.odd {
+  background-color: #f9f9f9;
+}
+table.dataTable.stripe tbody tr.odd.selected, table.dataTable.display tbody tr.odd.selected {
+  background-color: #acbad4;
+}
+table.dataTable.hover tbody tr:hover, table.dataTable.display tbody tr:hover {
+  background-color: #f6f6f6;
+}
+table.dataTable.hover tbody tr:hover.selected, table.dataTable.display tbody tr:hover.selected {
+  background-color: #aab7d1;
+}
+table.dataTable.order-column tbody tr > .sorting_1,
+table.dataTable.order-column tbody tr > .sorting_2,
+table.dataTable.order-column tbody tr > .sorting_3, table.dataTable.display tbody tr > .sorting_1,
+table.dataTable.display tbody tr > .sorting_2,
+table.dataTable.display tbody tr > .sorting_3 {
+  background-color: #fafafa;
+}
+table.dataTable.order-column tbody tr.selected > .sorting_1,
+table.dataTable.order-column tbody tr.selected > .sorting_2,
+table.dataTable.order-column tbody tr.selected > .sorting_3, table.dataTable.display tbody tr.selected > .sorting_1,
+table.dataTable.display tbody tr.selected > .sorting_2,
+table.dataTable.display tbody tr.selected > .sorting_3 {
+  background-color: #acbad5;
+}
+table.dataTable.display tbody tr.odd > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd > .sorting_1 {
+  background-color: #f1f1f1;
+}
+table.dataTable.display tbody tr.odd > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd > .sorting_2 {
+  background-color: #f3f3f3;
+}
+table.dataTable.display tbody tr.odd > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd > .sorting_3 {
+  background-color: whitesmoke;
+}
+table.dataTable.display tbody tr.odd.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_1 {
+  background-color: #a6b4cd;
+}
+table.dataTable.display tbody tr.odd.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_2 {
+  background-color: #a8b5cf;
+}
+table.dataTable.display tbody tr.odd.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.odd.selected > .sorting_3 {
+  background-color: #a9b7d1;
+}
+table.dataTable.display tbody tr.even > .sorting_1, table.dataTable.order-column.stripe tbody tr.even > .sorting_1 {
+  background-color: #fafafa;
+}
+table.dataTable.display tbody tr.even > .sorting_2, table.dataTable.order-column.stripe tbody tr.even > .sorting_2 {
+  background-color: #fcfcfc;
+}
+table.dataTable.display tbody tr.even > .sorting_3, table.dataTable.order-column.stripe tbody tr.even > .sorting_3 {
+  background-color: #fefefe;
+}
+table.dataTable.display tbody tr.even.selected > .sorting_1, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_1 {
+  background-color: #acbad5;
+}
+table.dataTable.display tbody tr.even.selected > .sorting_2, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_2 {
+  background-color: #aebcd6;
+}
+table.dataTable.display tbody tr.even.selected > .sorting_3, table.dataTable.order-column.stripe tbody tr.even.selected > .sorting_3 {
+  background-color: #afbdd8;
+}
+table.dataTable.display tbody tr:hover > .sorting_1, table.dataTable.order-column.hover tbody tr:hover > .sorting_1 {
+  background-color: #eaeaea;
+}
+table.dataTable.display tbody tr:hover > .sorting_2, table.dataTable.order-column.hover tbody tr:hover > .sorting_2 {
+  background-color: #ececec;
+}
+table.dataTable.display tbody tr:hover > .sorting_3, table.dataTable.order-column.hover tbody tr:hover > .sorting_3 {
+  background-color: #efefef;
+}
+table.dataTable.display tbody tr:hover.selected > .sorting_1, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_1 {
+  background-color: #a2aec7;
+}
+table.dataTable.display tbody tr:hover.selected > .sorting_2, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_2 {
+  background-color: #a3b0c9;
+}
+table.dataTable.display tbody tr:hover.selected > .sorting_3, table.dataTable.order-column.hover tbody tr:hover.selected > .sorting_3 {
+  background-color: #a5b2cb;
+}
+table.dataTable.no-footer {
+  border-bottom: 1px solid #111;
+}
+table.dataTable.nowrap th, table.dataTable.nowrap td {
+  white-space: nowrap;
+}
+table.dataTable.compact thead th,
+table.dataTable.compact thead td {
+  padding: 4px 17px 4px 4px;
+}
+table.dataTable.compact tfoot th,
+table.dataTable.compact tfoot td {
+  padding: 4px;
+}
+table.dataTable.compact tbody th,
+table.dataTable.compact tbody td {
+  padding: 4px;
+}
+table.dataTable th.dt-left,
+table.dataTable td.dt-left {
+  text-align: left;
+}
+table.dataTable th.dt-center,
+table.dataTable td.dt-center,
+table.dataTable td.dataTables_empty {
+  text-align: center;
+}
+table.dataTable th.dt-right,
+table.dataTable td.dt-right {
+  text-align: right;
+}
+table.dataTable th.dt-justify,
+table.dataTable td.dt-justify {
+  text-align: justify;
+}
+table.dataTable th.dt-nowrap,
+table.dataTable td.dt-nowrap {
+  white-space: nowrap;
+}
+table.dataTable thead th.dt-head-left,
+table.dataTable thead td.dt-head-left,
+table.dataTable tfoot th.dt-head-left,
+table.dataTable tfoot td.dt-head-left {
+  text-align: left;
+}
+table.dataTable thead th.dt-head-center,
+table.dataTable thead td.dt-head-center,
+table.dataTable tfoot th.dt-head-center,
+table.dataTable tfoot td.dt-head-center {
+  text-align: center;
+}
+table.dataTable thead th.dt-head-right,
+table.dataTable thead td.dt-head-right,
+table.dataTable tfoot th.dt-head-right,
+table.dataTable tfoot td.dt-head-right {
+  text-align: right;
+}
+table.dataTable thead th.dt-head-justify,
+table.dataTable thead td.dt-head-justify,
+table.dataTable tfoot th.dt-head-justify,
+table.dataTable tfoot td.dt-head-justify {
+  text-align: justify;
+}
+table.dataTable thead th.dt-head-nowrap,
+table.dataTable thead td.dt-head-nowrap,
+table.dataTable tfoot th.dt-head-nowrap,
+table.dataTable tfoot td.dt-head-nowrap {
+  white-space: nowrap;
+}
+table.dataTable tbody th.dt-body-left,
+table.dataTable tbody td.dt-body-left {
+  text-align: left;
+}
+table.dataTable tbody th.dt-body-center,
+table.dataTable tbody td.dt-body-center {
+  text-align: center;
+}
+table.dataTable tbody th.dt-body-right,
+table.dataTable tbody td.dt-body-right {
+  text-align: right;
+}
+table.dataTable tbody th.dt-body-justify,
+table.dataTable tbody td.dt-body-justify {
+  text-align: justify;
+}
+table.dataTable tbody th.dt-body-nowrap,
+table.dataTable tbody td.dt-body-nowrap {
+  white-space: nowrap;
+}
+
+table.dataTable,
+table.dataTable th,
+table.dataTable td {
+  box-sizing: content-box;
+}
+
+/*
+ * Control feature layout
+ */
+.dataTables_wrapper {
+  position: relative;
+  clear: both;
+  *zoom: 1;
+  zoom: 1;
+}
+.dataTables_wrapper .dataTables_length {
+  float: left;
+}
+.dataTables_wrapper .dataTables_filter {
+  float: right;
+  text-align: right;
+}
+.dataTables_wrapper .dataTables_filter input {
+  margin-left: 0.5em;
+}
+.dataTables_wrapper .dataTables_info {
+  clear: both;
+  float: left;
+  padding-top: 0.755em;
+}
+.dataTables_wrapper .dataTables_paginate {
+  float: right;
+  text-align: right;
+  padding-top: 0.25em;
+}
+.dataTables_wrapper .dataTables_paginate .paginate_button {
+  box-sizing: border-box;
+  display: inline-block;
+  min-width: 1.5em;
+  padding: 0.5em 1em;
+  margin-left: 2px;
+  text-align: center;
+  text-decoration: none !important;
+  cursor: pointer;
+  *cursor: hand;
+  color: #333 !important;
+  border: 1px solid transparent;
+  border-radius: 2px;
+}
+.dataTables_wrapper .dataTables_paginate .paginate_button.current, .dataTables_wrapper .dataTables_paginate .paginate_button.current:hover {
+  color: #333 !important;
+  border: 1px solid #979797;
+  background-color: white;
+  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, white), color-stop(100%, #dcdcdc));
+  /* Chrome,Safari4+ */
+  background: -webkit-linear-gradient(top, white 0%, #dcdcdc 100%);
+  /* Chrome10+,Safari5.1+ */
+  background: -moz-linear-gradient(top, white 0%, #dcdcdc 100%);
+  /* FF3.6+ */
+  background: -ms-linear-gradient(top, white 0%, #dcdcdc 100%);
+  /* IE10+ */
+  background: -o-linear-gradient(top, white 0%, #dcdcdc 100%);
+  /* Opera 11.10+ */
+  background: linear-gradient(to bottom, white 0%, #dcdcdc 100%);
+  /* W3C */
+}
+.dataTables_wrapper .dataTables_paginate .paginate_button.disabled, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:hover, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:active {
+  cursor: default;
+  color: #666 !important;
+  border: 1px solid transparent;
+  background: transparent;
+  box-shadow: none;
+}
+.dataTables_wrapper .dataTables_paginate .paginate_button:hover {
+  color: white !important;
+  border: 1px solid #111;
+  background-color: #585858;
+  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #585858), color-stop(100%, #111));
+  /* Chrome,Safari4+ */
+  background: -webkit-linear-gradient(top, #585858 0%, #111 100%);
+  /* Chrome10+,Safari5.1+ */
+  background: -moz-linear-gradient(top, #585858 0%, #111 100%);
+  /* FF3.6+ */
+  background: -ms-linear-gradient(top, #585858 0%, #111 100%);
+  /* IE10+ */
+  background: -o-linear-gradient(top, #585858 0%, #111 100%);
+  /* Opera 11.10+ */
+  background: linear-gradient(to bottom, #585858 0%, #111 100%);
+  /* W3C */
+}
+.dataTables_wrapper .dataTables_paginate .paginate_button:active {
+  outline: none;
+  background-color: #2b2b2b;
+  background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #2b2b2b), color-stop(100%, #0c0c0c));
+  /* Chrome,Safari4+ */
+  background: -webkit-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);
+  /* Chrome10+,Safari5.1+ */
+  background: -moz-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);
+  /* FF3.6+ */
+  background: -ms-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);
+  /* IE10+ */
+  background: -o-linear-gradient(top, #2b2b2b 0%, #0c0c0c 100%);
+  /* Opera 11.10+ */
+  background: linear-gradient(to bottom, #2b2b2b 0%, #0c0c0c 100%);
+  /* W3C */
+  box-shadow: inset 0 0 3px #111;
+}
+.dataTables_wrapper .dataTables_paginate .ellipsis {
+  padding: 0 1em;
+}
+.dataTables_wrapper .dataTables_processing {
+  position: absolute;
+  top: 50%;
+  left: 50%;
+  width: 100%;
+  height: 40px;
+  margin-left: -50%;
+  margin-top: -25px;
+  padding-top: 20px;
+  text-align: center;
+  font-size: 1.2em;
+  background-color: white;
+  background: -webkit-gradient(linear, left top, right top, color-stop(0%, rgba(255, 255, 255, 0)), color-stop(25%, rgba(255, 255, 255, 0.9)), color-stop(75%, rgba(255, 255, 255, 0.9)), color-stop(100%, rgba(255, 255, 255, 0)));
+  background: -webkit-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);
+  background: -moz-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);
+  background: -ms-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);
+  background: -o-linear-gradient(left, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);
+  background: linear-gradient(to right, rgba(255, 255, 255, 0) 0%, rgba(255, 255, 255, 0.9) 25%, rgba(255, 255, 255, 0.9) 75%, rgba(255, 255, 255, 0) 100%);
+}
+.dataTables_wrapper .dataTables_length,
+.dataTables_wrapper .dataTables_filter,
+.dataTables_wrapper .dataTables_info,
+.dataTables_wrapper .dataTables_processing,
+.dataTables_wrapper .dataTables_paginate {
+  color: #333;
+}
+.dataTables_wrapper .dataTables_scroll {
+  clear: both;
+}
+.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody {
+  *margin-top: -1px;
+  -webkit-overflow-scrolling: touch;
+}
+.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody > table > thead > tr > th, .dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody > table > thead > tr > td, .dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody > table > tbody > tr > th, .dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody > table > tbody > tr > td {
+  vertical-align: middle;
+}
+.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody > table > thead > tr > th > div.dataTables_sizing,
+.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody > table > thead > tr > td > div.dataTables_sizing, .dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody > table > tbody > tr > th > div.dataTables_sizing,
+.dataTables_wrapper .dataTables_scroll div.dataTables_scrollBody > table > tbody > tr > td > div.dataTables_sizing {
+  height: 0;
+  overflow: hidden;
+  margin: 0 !important;
+  padding: 0 !important;
+}
+.dataTables_wrapper.no-footer .dataTables_scrollBody {
+  border-bottom: 1px solid #111;
+}
+.dataTables_wrapper.no-footer div.dataTables_scrollHead table.dataTable,
+.dataTables_wrapper.no-footer div.dataTables_scrollBody > table {
+  border-bottom: none;
+}
+.dataTables_wrapper:after {
+  visibility: hidden;
+  display: block;
+  content: "";
+  clear: both;
+  height: 0;
+}
+
+@media screen and (max-width: 767px) {
+  .dataTables_wrapper .dataTables_info,
+  .dataTables_wrapper .dataTables_paginate {
+    float: none;
+    text-align: center;
+  }
+  .dataTables_wrapper .dataTables_paginate {
+    margin-top: 0.5em;
+  }
+}
+@media screen and (max-width: 640px) {
+  .dataTables_wrapper .dataTables_length,
+  .dataTables_wrapper .dataTables_filter {
+    float: none;
+    text-align: center;
+  }
+  .dataTables_wrapper .dataTables_filter {
+    margin-top: 0.5em;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/jui-dt.css
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/jui-dt.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/jui-dt.css
new file mode 100644
index 0000000..a81a1ac
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/css/jui-dt.css
@@ -0,0 +1,352 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+/*
+ *  File:         demo_table_jui.css
+ *  CVS:          $Id$
+ *  Description:  CSS descriptions for DataTables demo pages
+ *  Author:       Allan Jardine
+ *  Created:      Tue May 12 06:47:22 BST 2009
+ *  Modified:     $Date$ by $Author$
+ *  Language:     CSS
+ *  Project:      DataTables
+ *
+ *  Copyright 2009 Allan Jardine. All Rights Reserved.
+ *
+ * ***************************************************************************
+ * DESCRIPTION
+ *
+ * The styles given here are suitable for the demos that are used with the standard DataTables
+ * distribution (see www.datatables.net). You will most likely wish to modify these styles to
+ * meet the layout requirements of your site.
+ *
+ * Common issues:
+ *   'full_numbers' pagination - I use an extra selector on the body tag to ensure that there is
+ *     no conflict between the two pagination types. If you want to use full_numbers pagination
+ *     ensure that you either have "example_alt_pagination" as a body class name, or better yet,
+ *     modify that selector.
+ *   Note that the path used for Images is relative. All images are by default located in
+ *     ../images/ - relative to this CSS file.
+ */
+
+/*
+ * jQuery UI specific styling
+ */
+
+.paging_two_button .ui-button {
+  float: left;
+  cursor: pointer;
+  *cursor: hand;
+}
+
+.paging_full_numbers .ui-button {
+  padding: 2px 6px;
+  margin: 0;
+  cursor: pointer;
+  *cursor: hand;
+}
+
+.ui-buttonset .ui-button {
+  margin-right: -0.1em !important;
+}
+
+.paging_full_numbers {
+  width: 350px !important;
+}
+
+.ui-toolbar {
+  padding: 5px;
+}
+
+.dataTables_paginate {
+  width: auto;
+}
+
+.dataTables_info {
+  padding-top: 3px;
+}
+
+table.display thead th {
+  padding: 3px 0px 3px 10px;
+  cursor: pointer;
+  *cursor: hand;
+}
+
+div.dataTables_wrapper .ui-widget-header {
+  font-weight: normal;
+}
+
+/*
+ * Sort arrow icon positioning
+ */
+table.display thead th div.DataTables_sort_wrapper {
+  position: relative;
+  padding-right: 20px;
+  padding-right: 20px;
+}
+
+table.display thead th div.DataTables_sort_wrapper span {
+  position: absolute;
+  top: 50%;
+  margin-top: -8px;
+  right: 0;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ *
+ * Everything below this line is the same as demo_table.css. This file is
+ * required for 'cleanliness' of the markup
+ *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables features
+ */
+
+.dataTables_wrapper {
+  position: relative;
+  min-height: 35px;
+  _height: 35px;
+  clear: both;
+}
+
+.dataTables_processing {
+  position: absolute;
+  top: 0px;
+  left: 50%;
+  width: 250px;
+  margin-left: -125px;
+  border: 1px solid #ddd;
+  text-align: center;
+  color: #999;
+  font-size: 11px;
+  padding: 2px 0;
+}
+
+.dataTables_length {
+  width: 40%;
+  float: left;
+}
+
+.dataTables_filter {
+  width: 50%;
+  float: right;
+  text-align: right;
+}
+
+.dataTables_info {
+  width: 50%;
+  float: left;
+}
+
+.dataTables_paginate {
+  float: right;
+  text-align: right;
+}
+
+/* Pagination nested */
+.paginate_disabled_previous,
+.paginate_enabled_previous,
+.paginate_disabled_next,
+.paginate_enabled_next {
+  height: 19px;
+  width: 19px;
+  margin-left: 3px;
+  float: left;
+}
+
+.paginate_disabled_previous {
+  background-image: url("../images/back_disabled.jpg");
+}
+
+.paginate_enabled_previous {
+  background-image: url("../images/back_enabled.jpg");
+}
+
+.paginate_disabled_next {
+  background-image: url("../images/forward_disabled.jpg");
+}
+
+.paginate_enabled_next {
+  background-image: url("../images/forward_enabled.jpg");
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables display
+ */
+table.display {
+  margin: 0 auto;
+  width: 100%;
+  clear: both;
+  border-collapse: collapse;
+}
+
+table.display tfoot th {
+  padding: 3px 0px 3px 10px;
+  font-weight: bold;
+  font-weight: normal;
+}
+
+table.display tr.heading2 td {
+  border-bottom: 1px solid #aaa;
+}
+
+table.display td {
+  padding: 3px 10px;
+}
+
+table.display td.center {
+  text-align: center;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * DataTables sorting
+ */
+
+.sorting_asc {
+  background: url("../images/sort_asc.png") no-repeat center right;
+}
+
+.sorting_desc {
+  background: url("../images/sort_desc.png") no-repeat center right;
+}
+
+.sorting {
+  background: url("../images/sort_both.png") no-repeat center right;
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Misc
+ */
+.dataTables_scroll {
+  clear: both;
+}
+
+.top,
+.bottom {
+  padding: 15px;
+  background-color: #f5f5f5;
+  border: 1px solid #cccccc;
+}
+
+.top .dataTables_info {
+  float: none;
+}
+
+.clear {
+  clear: both;
+}
+
+.dataTables_empty {
+  text-align: center;
+}
+
+tfoot input {
+  margin: 0.5em 0;
+  width: 100%;
+  color: #444;
+}
+
+tfoot input.search_init {
+  color: #999;
+}
+
+td.group {
+  background-color: #d1cfd0;
+  border-bottom: 2px solid #a19b9e;
+  border-top: 2px solid #a19b9e;
+}
+
+td.details {
+  background-color: #d1cfd0;
+  border: 2px solid #a19b9e;
+}
+
+.example_alt_pagination div.dataTables_info {
+  width: 40%;
+}
+
+.paging_full_numbers span.paginate_button,
+.paging_full_numbers span.paginate_active {
+  border: 1px solid #aaa;
+  -webkit-border-radius: 5px;
+  -moz-border-radius: 5px;
+  padding: 2px 5px;
+  margin: 0 3px;
+  cursor: pointer;
+  *cursor: hand;
+}
+
+.paging_full_numbers span.paginate_button {
+  background-color: #ddd;
+}
+
+.paging_full_numbers span.paginate_button:hover {
+  background-color: #ccc;
+}
+
+.paging_full_numbers span.paginate_active {
+  background-color: #99b3ff;
+}
+
+table.display tr.even.row_selected td {
+  background-color: #b0bed9;
+}
+
+table.display tr.odd.row_selected td {
+  background-color: #9fafd1;
+}
+
+/* Striping */
+tr.odd {
+  background: rgba(255, 255, 255, 0.1);
+}
+tr.even {
+  background: rgba(0, 0, 255, 0.05);
+}
+
+/*
+ * Sorting classes for columns
+ */
+tr.odd td.sorting_1 {
+  background: rgba(0, 0, 0, 0.03);
+}
+tr.odd td.sorting_2 {
+  background: rgba(0, 0, 0, 0.02);
+}
+tr.odd td.sorting_3 {
+  background: rgba(0, 0, 0, 0.02);
+}
+tr.even td.sorting_1 {
+  background: rgba(0, 0, 0, 0.08);
+}
+tr.even td.sorting_2 {
+  background: rgba(0, 0, 0, 0.06);
+}
+tr.even td.sorting_3 {
+  background: rgba(0, 0, 0, 0.06);
+}
+
+.css_left {
+  position: relative;
+  float: left;
+}
+.css_right {
+  position: relative;
+  float: right;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/Sorting icons.psd
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/Sorting icons.psd b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/Sorting icons.psd
new file mode 100644
index 0000000..53b2e06
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/Sorting icons.psd differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/back_disabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/back_disabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/back_disabled.jpg
new file mode 100644
index 0000000..1e73a54
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/back_disabled.jpg differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/back_enabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/back_enabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/back_enabled.jpg
new file mode 100644
index 0000000..a6d764c
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/back_enabled.jpg differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/favicon.ico
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/favicon.ico b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/favicon.ico
new file mode 100644
index 0000000..6eeaa2a
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/favicon.ico differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/forward_disabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/forward_disabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/forward_disabled.jpg
new file mode 100644
index 0000000..28a9dc5
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/forward_disabled.jpg differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/forward_enabled.jpg
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/forward_enabled.jpg b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/forward_enabled.jpg
new file mode 100644
index 0000000..598c075
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/forward_enabled.jpg differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_asc.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_asc.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_asc.png
new file mode 100644
index 0000000..a56d0e2
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_asc.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_asc_disabled.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_asc_disabled.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_asc_disabled.png
new file mode 100644
index 0000000..b7e621e
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_asc_disabled.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_both.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_both.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_both.png
new file mode 100644
index 0000000..839ac4b
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_both.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_desc.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_desc.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_desc.png
new file mode 100644
index 0000000..90b2951
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_desc.png differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d36012b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_desc_disabled.png
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_desc_disabled.png b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_desc_disabled.png
new file mode 100644
index 0000000..2409653
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/images/sort_desc_disabled.png differ


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: YARN-8950. Fix compilation issue due to dependency convergence error for hbase.profile=2.0.

Posted by su...@apache.org.
YARN-8950. Fix compilation issue due to dependency convergence error for hbase.profile=2.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4ec4ec69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4ec4ec69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4ec4ec69

Branch: refs/heads/HDFS-12943
Commit: 4ec4ec69711180d642c5b56cd3d3dbdf44d3c61f
Parents: db7e636
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Tue Oct 30 11:29:58 2018 +0530
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Tue Oct 30 11:30:08 2018 +0530

----------------------------------------------------------------------
 .../hadoop-yarn-server-timelineservice-hbase-client/pom.xml  | 8 ++++++++
 .../pom.xml                                                  | 8 ++++++++
 2 files changed, 16 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ec4ec69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
index 86b2158..4225519 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml
@@ -160,6 +160,14 @@
           <groupId>org.mortbay.jetty</groupId>
           <artifactId>jetty-sslengine</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.eclipse.jetty</groupId>
+          <artifactId>jetty-security</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.eclipse.jetty</groupId>
+          <artifactId>jetty-http</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4ec4ec69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
index 4fde40c..984cac9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml
@@ -147,6 +147,14 @@
               <groupId>org.mortbay.jetty</groupId>
               <artifactId>jetty-sslengine</artifactId>
             </exclusion>
+            <exclusion>
+              <groupId>org.eclipse.jetty</groupId>
+              <artifactId>jetty-security</artifactId>
+            </exclusion>
+            <exclusion>
+              <groupId>org.eclipse.jetty</groupId>
+              <artifactId>jetty-http</artifactId>
+            </exclusion>
           </exclusions>
         </dependency>
       </dependencies>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDDS-727. ozone.log is not getting created in logs directory. Contributed by Nilotpal Nandi.

Posted by su...@apache.org.
HDDS-727. ozone.log is not getting created in logs directory. Contributed by Nilotpal Nandi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a58048e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a58048e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a58048e8

Branch: refs/heads/HDFS-12943
Commit: a58048e8d545534e5b7f32e747df5a5f598889e2
Parents: 902345d
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Oct 29 09:35:18 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Mon Oct 29 09:35:18 2018 -0700

----------------------------------------------------------------------
 .../common/src/main/conf/log4j.properties       | 157 -------------------
 .../src/main/conf/om-audit-log4j2.properties    |  90 -----------
 .../common/src/main/conf/ozone-site.xml         |  24 ---
 .../dist/dev-support/bin/dist-layout-stitching  |   4 +-
 .../dist/src/main/conf/log4j.properties         | 157 +++++++++++++++++++
 .../src/main/conf/om-audit-log4j2.properties    |  90 +++++++++++
 hadoop-ozone/dist/src/main/conf/ozone-site.xml  |  24 +++
 7 files changed, 274 insertions(+), 272 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-hdds/common/src/main/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/conf/log4j.properties b/hadoop-hdds/common/src/main/conf/log4j.properties
deleted file mode 100644
index 663e254..0000000
--- a/hadoop-hdds/common/src/main/conf/log4j.properties
+++ /dev/null
@@ -1,157 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshold=ALL
-
-# Null Appender
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Rolling File Appender - cap space usage at 5gb.
-#
-hadoop.log.maxfilesize=256MB
-hadoop.log.maxbackupindex=20
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
-log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollover at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# HDFS block state change log from block manager
-#
-# Uncomment the following to log normal block state change
-# messages from BlockManager in NameNode.
-#log4j.logger.BlockStateChange=DEBUG
-
-#
-#Security appender
-#
-hadoop.security.logger=INFO,NullAppender
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth-${user.name}.audit
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# Daily Rolling Security appender
-#
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-
-# Custom Logging levels
-# AWS SDK & S3A FileSystem
-#log4j.logger.com.amazonaws=ERROR
-log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
-#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-
-log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
-
-# Do not log into datanode logs. Remove this line to have single log.
-log4j.additivity.org.apache.hadoop.ozone=false
-
-# For development purposes, log both to console and log file.
-log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
-log4j.appender.OZONE.Threshold=info
-log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
-log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
- %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
-
-# Real ozone logger that writes to ozone.log
-log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
-log4j.appender.FILE.Threshold=debug
-log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
-(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
-%m%n
-
-# Log levels of third-party libraries
-log4j.logger.org.apache.commons.beanutils=WARN
-
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
-log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties b/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
deleted file mode 100644
index 7be51ac..0000000
--- a/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with this
-# work for additional information regarding copyright ownership.  The ASF
-# licenses this file to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# <p>
-# http://www.apache.org/licenses/LICENSE-2.0
-# <p>
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-name=PropertiesConfig
-
-# Checks for config change periodically and reloads
-monitorInterval=30
-
-filter=read,write
-# filter.read.onMatch=DENY avoids logging all READ events
-# filter.read.onMatch=ACCEPT permits logging all READ events
-# The above two settings ignore the log levels in configuration
-# filter.read.onMatch=NEUTRAL permits logging of only those READ events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.read.type=MarkerFilter
-filter.read.marker=READ
-filter.read.onMatch=DENY
-filter.read.onMismatch=NEUTRAL
-
-# filter.write.onMatch=DENY avoids logging all WRITE events
-# filter.write.onMatch=ACCEPT permits logging all WRITE events
-# The above two settings ignore the log levels in configuration
-# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events
-# which are attempted at log level equal or greater than log level specified
-# in the configuration
-filter.write.type=MarkerFilter
-filter.write.marker=WRITE
-filter.write.onMatch=NEUTRAL
-filter.write.onMismatch=NEUTRAL
-
-# Log Levels are organized from most specific to least:
-# OFF (most specific, no logging)
-# FATAL (most specific, little data)
-# ERROR
-# WARN
-# INFO
-# DEBUG
-# TRACE (least specific, a lot of data)
-# ALL (least specific, all data)
-
-# Uncomment following section to enable logging to console appender also
-#appenders=console, rolling
-#appender.console.type=Console
-#appender.console.name=STDOUT
-#appender.console.layout.type=PatternLayout
-#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-
-# Comment this line when using both console and rolling appenders
-appenders=rolling
-
-#Rolling File Appender with size & time thresholds.
-#Rolling is triggered when either threshold is breached.
-#The rolled over file is compressed by default
-#Time interval is specified in seconds 86400s=1 day
-appender.rolling.type=RollingFile
-appender.rolling.name=RollingFile
-appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log
-appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
-appender.rolling.layout.type=PatternLayout
-appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
-appender.rolling.policies.type=Policies
-appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
-appender.rolling.policies.time.interval=86400
-appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
-appender.rolling.policies.size.size=64MB
-
-loggers=audit
-logger.audit.type=AsyncLogger
-logger.audit.name=OMAudit
-logger.audit.level=INFO
-logger.audit.appenderRefs=rolling
-logger.audit.appenderRef.file.ref=RollingFile
-
-rootLogger.level=INFO
-rootLogger.appenderRefs=stdout
-rootLogger.appenderRef.stdout.ref=STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/common/src/main/conf/ozone-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/conf/ozone-site.xml b/hadoop-ozone/common/src/main/conf/ozone-site.xml
deleted file mode 100644
index 77dd7ef..0000000
--- a/hadoop-ozone/common/src/main/conf/ozone-site.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index cb6312d..612ee54 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -78,7 +78,9 @@ run mkdir -p ./etc
 run mkdir -p ./libexec
 
 run cp -r "${ROOT}/hadoop-common-project/hadoop-common/src/main/conf" "etc/hadoop"
-run cp "${ROOT}/hadoop-ozone/common/src/main/conf/om-audit-log4j2.properties" "etc/hadoop"
+run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties" "etc/hadoop"
+run cp "${ROOT}/hadoop-ozone/dist/src/main/conf/ozone-site.xml" "etc/hadoop"
+run cp -f "${ROOT}/hadoop-ozone/dist/src/main/conf/log4j.properties" "etc/hadoop"
 run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop" "bin/"
 run cp "${ROOT}/hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd" "bin/"
 run cp "${ROOT}/hadoop-ozone/common/src/main/bin/ozone" "bin/"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/dist/src/main/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/conf/log4j.properties b/hadoop-ozone/dist/src/main/conf/log4j.properties
new file mode 100644
index 0000000..663e254
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/conf/log4j.properties
@@ -0,0 +1,157 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+# Null Appender
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollover at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# HDFS block state change log from block manager
+#
+# Uncomment the following to log normal block state change
+# messages from BlockManager in NameNode.
+#log4j.logger.BlockStateChange=DEBUG
+
+#
+#Security appender
+#
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+
+# Custom Logging levels
+# AWS SDK & S3A FileSystem
+#log4j.logger.com.amazonaws=ERROR
+log4j.logger.com.amazonaws.http.AmazonHttpClient=ERROR
+#log4j.logger.org.apache.hadoop.fs.s3a.S3AFileSystem=WARN
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+
+log4j.logger.org.apache.hadoop.ozone=DEBUG,OZONE,FILE
+
+# Do not log into datanode logs. Remove this line to have single log.
+log4j.additivity.org.apache.hadoop.ozone=false
+
+# For development purposes, log both to console and log file.
+log4j.appender.OZONE=org.apache.log4j.ConsoleAppender
+log4j.appender.OZONE.Threshold=info
+log4j.appender.OZONE.layout=org.apache.log4j.PatternLayout
+log4j.appender.OZONE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
+ %X{component} %X{function} %X{resource} %X{user} %X{request} - %m%n
+
+# Real ozone logger that writes to ozone.log
+log4j.appender.FILE=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.FILE.File=${hadoop.log.dir}/ozone.log
+log4j.appender.FILE.Threshold=debug
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%d{ISO8601} [%t] %-5p \
+(%F:%L) %X{function} %X{resource} %X{user} %X{request} - \
+%m%n
+
+# Log levels of third-party libraries
+log4j.logger.org.apache.commons.beanutils=WARN
+
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
+log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
new file mode 100644
index 0000000..7be51ac
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/conf/om-audit-log4j2.properties
@@ -0,0 +1,90 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with this
+# work for additional information regarding copyright ownership.  The ASF
+# licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# <p>
+# http://www.apache.org/licenses/LICENSE-2.0
+# <p>
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+name=PropertiesConfig
+
+# Checks for config change periodically and reloads
+monitorInterval=30
+
+filter=read,write
+# filter.read.onMatch=DENY avoids logging all READ events
+# filter.read.onMatch=ACCEPT permits logging all READ events
+# The above two settings ignore the log levels in configuration
+# filter.read.onMatch=NEUTRAL permits logging of only those READ events
+# which are attempted at log level equal or greater than log level specified
+# in the configuration
+filter.read.type=MarkerFilter
+filter.read.marker=READ
+filter.read.onMatch=DENY
+filter.read.onMismatch=NEUTRAL
+
+# filter.write.onMatch=DENY avoids logging all WRITE events
+# filter.write.onMatch=ACCEPT permits logging all WRITE events
+# The above two settings ignore the log levels in configuration
+# filter.write.onMatch=NEUTRAL permits logging of only those WRITE events
+# which are attempted at log level equal or greater than log level specified
+# in the configuration
+filter.write.type=MarkerFilter
+filter.write.marker=WRITE
+filter.write.onMatch=NEUTRAL
+filter.write.onMismatch=NEUTRAL
+
+# Log Levels are organized from most specific to least:
+# OFF (most specific, no logging)
+# FATAL (most specific, little data)
+# ERROR
+# WARN
+# INFO
+# DEBUG
+# TRACE (least specific, a lot of data)
+# ALL (least specific, all data)
+
+# Uncomment following section to enable logging to console appender also
+#appenders=console, rolling
+#appender.console.type=Console
+#appender.console.name=STDOUT
+#appender.console.layout.type=PatternLayout
+#appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
+
+# Comment this line when using both console and rolling appenders
+appenders=rolling
+
+#Rolling File Appender with size & time thresholds.
+#Rolling is triggered when either threshold is breached.
+#The rolled over file is compressed by default
+#Time interval is specified in seconds 86400s=1 day
+appender.rolling.type=RollingFile
+appender.rolling.name=RollingFile
+appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log
+appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
+appender.rolling.layout.type=PatternLayout
+appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
+appender.rolling.policies.type=Policies
+appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval=86400
+appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size=64MB
+
+loggers=audit
+logger.audit.type=AsyncLogger
+logger.audit.name=OMAudit
+logger.audit.level=INFO
+logger.audit.appenderRefs=rolling
+logger.audit.appenderRef.file.ref=RollingFile
+
+rootLogger.level=INFO
+rootLogger.appenderRefs=stdout
+rootLogger.appenderRef.stdout.ref=STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a58048e8/hadoop-ozone/dist/src/main/conf/ozone-site.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/src/main/conf/ozone-site.xml b/hadoop-ozone/dist/src/main/conf/ozone-site.xml
new file mode 100644
index 0000000..77dd7ef
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/conf/ozone-site.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+</configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org