You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/09/20 22:21:23 UTC

[01/50] [abbrv] hadoop git commit: HDDS-495. Ozone docs and ozonefs packages have undefined hadoop component. Contributed by Dinesh Chitlangia. [Forced Update!]

Repository: hadoop
Updated Branches:
  refs/heads/HDDS-4 7ca014424 -> 5641534e4 (forced update)


HDDS-495. Ozone docs and ozonefs packages have undefined hadoop component. Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58963727
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58963727
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58963727

Branch: refs/heads/HDDS-4
Commit: 589637276105b0f9b9d5b7f6207f6ad0892f0b28
Parents: 2df0a8d
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Tue Sep 18 13:50:28 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Tue Sep 18 13:50:39 2018 -0700

----------------------------------------------------------------------
 hadoop-ozone/docs/pom.xml    | 5 +++++
 hadoop-ozone/ozonefs/pom.xml | 2 ++
 2 files changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58963727/hadoop-ozone/docs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/pom.xml b/hadoop-ozone/docs/pom.xml
index d8edd15..64d0ec8 100644
--- a/hadoop-ozone/docs/pom.xml
+++ b/hadoop-ozone/docs/pom.xml
@@ -28,6 +28,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <name>Apache Hadoop Ozone Documentation</name>
   <packaging>jar</packaging>
 
+  <properties>
+    <hadoop.component>ozone</hadoop.component>
+    <is.hadoop.component>true</is.hadoop.component>
+  </properties>
+
   <dependencies>
 
   </dependencies>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58963727/hadoop-ozone/ozonefs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml
index 8174b74..c45aacd 100644
--- a/hadoop-ozone/ozonefs/pom.xml
+++ b/hadoop-ozone/ozonefs/pom.xml
@@ -28,6 +28,8 @@
   <properties>
     <file.encoding>UTF-8</file.encoding>
     <downloadSources>true</downloadSources>
+    <hadoop.component>ozone</hadoop.component>
+    <is.hadoop.component>true</is.hadoop.component>
   </properties>
 
   <build>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-8791. Trim docker inspect output for line feed for STOPSIGNAL parsing. Contributed by Chandni Singh

Posted by xy...@apache.org.
YARN-8791. Trim docker inspect output for line feed for STOPSIGNAL parsing.
           Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efdea85a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efdea85a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efdea85a

Branch: refs/heads/HDDS-4
Commit: efdea85ad1cd4cc5a2a306898dbdb2c14b952d02
Parents: 1824d5d
Author: Eric Yang <ey...@apache.org>
Authored: Wed Sep 19 13:16:11 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Wed Sep 19 13:16:11 2018 -0400

----------------------------------------------------------------------
 .../linux/runtime/DockerLinuxContainerRuntime.java                 | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efdea85a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 2b53f13..55dfdc3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -1282,7 +1282,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
             DockerInspectCommand.STATUS_TEMPLATE,
             DockerInspectCommand.STOPSIGNAL_TEMPLATE}, delimiter);
     try {
-      String output = executeDockerInspect(containerId, inspectCommand);
+      String output = executeDockerInspect(containerId, inspectCommand).trim();
 
       if (!output.isEmpty()) {
         String[] statusAndSignal = StringUtils.split(output, delimiter);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HADOOP-15772. Remove the 'Path ... should be specified as a URI' warnings on startup. Contributed by Ayush Saxena.

Posted by xy...@apache.org.
HADOOP-15772. Remove the 'Path ... should be specified as a URI' warnings on startup. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2eb597b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2eb597b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2eb597b1

Branch: refs/heads/HDDS-4
Commit: 2eb597b1511f8f46866abe4eeec820f4191cc295
Parents: a30b4f9
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Sep 19 13:48:27 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Wed Sep 19 13:48:27 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/server/common/Util.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2eb597b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index 4e30e50..e9f9bfb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
@@ -97,8 +97,7 @@ public final class Util {
 
     // if URI is null or scheme is undefined, then assume it's file://
     if(u == null || u.getScheme() == null){
-      LOG.warn("Path " + s + " should be specified as a URI "
-          + "in configuration files. Please update hdfs configuration.");
+      LOG.info("Assuming 'file' scheme for path " + s + " in configuration.");
       u = fileAsURI(new File(s));
     }
     return u;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HADOOP-15726. Create utility to limit frequency of log statements. Contributed by Erik Krogen.

Posted by xy...@apache.org.
HADOOP-15726. Create utility to limit frequency of log statements. Contributed by Erik Krogen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a30b4f9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a30b4f9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a30b4f9e

Branch: refs/heads/HDDS-4
Commit: a30b4f9e71cf53f79c38878d6cbe5bbe79bcb277
Parents: 98c9bc4
Author: Chen Liang <cl...@apache.org>
Authored: Wed Sep 19 13:22:37 2018 -0700
Committer: Chen Liang <cl...@apache.org>
Committed: Wed Sep 19 13:22:37 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/log/LogThrottlingHelper.java  | 358 +++++++++++++++++++
 .../hadoop/log/TestLogThrottlingHelper.java     | 172 +++++++++
 .../hdfs/server/namenode/FSNamesystemLock.java  |  46 +--
 3 files changed, 547 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a30b4f9e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java
new file mode 100644
index 0000000..aa4e61c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogThrottlingHelper.java
@@ -0,0 +1,358 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.log;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.commons.math3.stat.descriptive.SummaryStatistics;
+import org.apache.hadoop.util.Timer;
+
+/**
+ * This is a class to help easily throttle log statements, so that they will
+ * not be emitted more frequently than a certain rate. It is useful to help
+ * prevent flooding the application logs with redundant messages.
+ *
+ * The instantiator specifies a minimum period at which statements should be
+ * logged. When {@link #record(double...)} is called, if enough time has elapsed
+ * since the last time it was called, the return value will indicate to the
+ * caller that it should write to its actual log. Note that this class does not
+ * write to any actual log; it only records information about how many times
+ * {@code record} has been called and with what arguments, and indicates to the
+ * caller whether or not it should write to its log. If not enough time has yet
+ * elapsed, this class records the arguments and updates its summary
+ * information, and indicates to the caller that it should not log.
+ *
+ * For example, say that you want to know whenever too large of a request is
+ * received, but want to avoid flooding the logs if many such requests are
+ * received.
+ * <pre>{@code
+ *   // Helper with a minimum period of 5 seconds
+ *   private LogThrottlingHelper helper = new LogThrottlingHelper(5000);
+ *
+ *   public void receiveRequest(int requestedObjects) {
+ *     if (requestedObjects > MAXIMUM_REQUEST_SIZE) {
+ *       LogAction logAction = helper.record(requestedObjects);
+ *       if (logAction.shouldLog()) {
+ *         LOG.warn("Received {} large request(s) with a total of {} objects " +
+ *             "requested; maximum objects requested was {}",
+ *             logAction.getCount(), logAction.getStats(0).getSum(),
+ *             logAction.getStats(0).getMax());
+ *       }
+ *     }
+ *   }
+ * }</pre>
+ * The above snippet allows you to record extraneous events, but if they become
+ * frequent, to limit their presence in the log to only every 5 seconds while
+ * still maintaining overall information about how many large requests were
+ * received.
+ *
+ * <p/>This class can also be used to coordinate multiple logging points; see
+ * {@link #record(String, long, double...)} for more details.
+ *
+ * <p/>This class is not thread-safe.
+ */
+public class LogThrottlingHelper {
+
+  /**
+   * An indication of what action the caller should take. If
+   * {@link #shouldLog()} is false, no other action should be taken, and it is
+   * an error to try to access any of the summary information. If
+   * {@link #shouldLog()} is true, then the caller should write to its log, and
+   * can use the {@link #getCount()} and {@link #getStats(int)} methods to
+   * determine summary information about what has been recorded into this
+   * helper.
+   *
+   * All summary information in this action only represents
+   * {@link #record(double...)} statements which were called <i>after</i> the
+   * last time the caller logged something; that is, since the last time a log
+   * action was returned with a true value for {@link #shouldLog()}. Information
+   * about the {@link #record(double...)} statement which created this log
+   * action is included.
+   */
+  public interface LogAction {
+
+    /**
+     * Return the number of records encapsulated in this action; that is, the
+     * number of times {@code record} was called to produce this action,
+     * including the current one.
+     */
+    int getCount();
+
+    /**
+     * Return summary information for the value that was recorded at index
+     * {@code idx}. Corresponds to the ordering of values passed to
+     * {@link #record(double...)}.
+     */
+    SummaryStatistics getStats(int idx);
+
+    /**
+     * If this is true, the caller should write to its log. Otherwise, the
+     * caller should take no action, and it is an error to call other methods
+     * on this object.
+     */
+    boolean shouldLog();
+
+  }
+
+  /**
+   * A {@link LogAction} representing a state that should not yet be logged.
+   * If any attempt is made to extract information from this, it will throw
+   * an {@link IllegalStateException}.
+   */
+  public static final LogAction DO_NOT_LOG = new NoLogAction();
+  private static final String DEFAULT_RECORDER_NAME =
+      "__DEFAULT_RECORDER_NAME__";
+
+  /**
+   * This throttler will not trigger log statements more frequently than this
+   * period.
+   */
+  private final long minLogPeriodMs;
+  /**
+   * The name of the recorder treated as the primary; this is the only one which
+   * will trigger logging. Other recorders are dependent on the state of this
+   * recorder. This may be null, in which case a primary has not yet been set.
+   */
+  private String primaryRecorderName;
+  private final Timer timer;
+  private final Map<String, LoggingAction> currentLogs;
+
+  private long lastLogTimestampMs = Long.MIN_VALUE;
+
+  /**
+   * Create a log helper without any primary recorder.
+   *
+   * @see #LogThrottlingHelper(long, String)
+   */
+  public LogThrottlingHelper(long minLogPeriodMs) {
+    this(minLogPeriodMs, null);
+  }
+
+  /**
+   * Create a log helper with a specified primary recorder name; this can be
+   * used in conjunction with {@link #record(String, long, double...)} to set up
+   * primary and dependent recorders. See
+   * {@link #record(String, long, double...)} for more details.
+   *
+   * @param minLogPeriodMs The minimum period with which to log; do not log
+   *                       more frequently than this.
+   * @param primaryRecorderName The name of the primary recorder.
+   */
+  public LogThrottlingHelper(long minLogPeriodMs, String primaryRecorderName) {
+    this(minLogPeriodMs, primaryRecorderName, new Timer());
+  }
+
+  @VisibleForTesting
+  LogThrottlingHelper(long minLogPeriodMs, String primaryRecorderName,
+      Timer timer) {
+    this.minLogPeriodMs = minLogPeriodMs;
+    this.primaryRecorderName = primaryRecorderName;
+    this.timer = timer;
+    this.currentLogs = new HashMap<>();
+  }
+
+  /**
+   * Record some set of values at the current time into this helper. Note that
+   * this does <i>not</i> actually write information to any log. Instead, this
+   * will return a LogAction indicating whether or not the caller should write
+   * to its own log. The LogAction will additionally contain summary information
+   * about the values specified since the last time the caller was expected to
+   * write to its log.
+   *
+   * <p/>Specifying multiple values will maintain separate summary statistics
+   * about each value. For example:
+   * <pre>{@code
+   *   helper.record(1, 0);
+   *   LogAction action = helper.record(3, 100);
+   *   action.getStats(0); // == 2
+   *   action.getStats(1); // == 50
+   * }</pre>
+   *
+   * @param values The values about which to maintain summary information. Every
+   *               time this method is called, the same number of values must
+   *               be specified.
+   * @return A LogAction indicating whether or not the caller should write to
+   *         its log.
+   */
+  public LogAction record(double... values) {
+    return record(DEFAULT_RECORDER_NAME, timer.monotonicNow(), values);
+  }
+
+  /**
+   * Record some set of values at the specified time into this helper. This can
+   * be useful to avoid fetching the current time twice if the caller has
+   * already done so for other purposes. This additionally allows the caller to
+   * specify a name for this recorder. When multiple names are used, one is
+   * denoted as the primary recorder. Only recorders named as the primary
+   * will trigger logging; other names not matching the primary can <i>only</i>
+   * be triggered by following the primary. This is used to coordinate multiple
+   * logging points. A primary can be set via the
+   * {@link #LogThrottlingHelper(long, String)} constructor. If no primary
+   * is set in the constructor, then the first recorder name used becomes the
+   * primary.
+   *
+   * If multiple names are used, they maintain entirely different sets of values
+   * and summary information. For example:
+   * <pre>{@code
+   *   // Initialize "pre" as the primary recorder name
+   *   LogThrottlingHelper helper = new LogThrottlingHelper(1000, "pre");
+   *   LogAction preLog = helper.record("pre", Time.monotonicNow());
+   *   if (preLog.shouldLog()) {
+   *     // ...
+   *   }
+   *   double eventsProcessed = ... // perform some action
+   *   LogAction postLog =
+   *       helper.record("post", Time.monotonicNow(), eventsProcessed);
+   *   if (postLog.shouldLog()) {
+   *     // ...
+   *     // Can use postLog.getStats(0) to access eventsProcessed information
+   *   }
+   * }</pre>
+   * Since "pre" is the primary recorder name, logging to "pre" will trigger a
+   * log action if enough time has elapsed. This will indicate that "post"
+   * should log as well. This ensures that "post" is always logged in the same
+   * iteration as "pre", yet each one is able to maintain its own summary
+   * information.
+   *
+   * <p/>Other behavior is the same as {@link #record(double...)}.
+   *
+   * @param recorderName The name of the recorder. This is used to check if the
+   *                     current recorder is the primary. Other names are
+   *                     arbitrary and are only used to differentiate between
+   *                     distinct recorders.
+   * @param currentTimeMs The current time.
+   * @param values The values to log.
+   * @return The LogAction for the specified recorder.
+   *
+   * @see #record(double...)
+   */
+  public LogAction record(String recorderName, long currentTimeMs,
+      double... values) {
+    if (primaryRecorderName == null) {
+      primaryRecorderName = recorderName;
+    }
+    LoggingAction currentLog = currentLogs.get(recorderName);
+    if (currentLog == null || currentLog.hasLogged()) {
+      currentLog = new LoggingAction(values.length);
+      if (!currentLogs.containsKey(recorderName)) {
+        // Always log newly created loggers
+        currentLog.setShouldLog();
+      }
+      currentLogs.put(recorderName, currentLog);
+    }
+    currentLog.recordValues(values);
+    if (primaryRecorderName.equals(recorderName) &&
+        currentTimeMs - minLogPeriodMs >= lastLogTimestampMs) {
+      lastLogTimestampMs = currentTimeMs;
+      for (LoggingAction log : currentLogs.values()) {
+        log.setShouldLog();
+      }
+    }
+    if (currentLog.shouldLog()) {
+      currentLog.setHasLogged();
+      return currentLog;
+    } else {
+      return DO_NOT_LOG;
+    }
+  }
+
+  /**
+   * A standard log action which keeps track of all of the values which have
+   * been logged. This is also used for internal bookkeeping via its private
+   * fields and methods; it will maintain whether or not it is ready to be
+   * logged ({@link #shouldLog()}) as well as whether or not it has been
+   * returned for logging yet ({@link #hasLogged()}).
+   */
+  private static class LoggingAction implements LogAction {
+
+    private int count = 0;
+    private final SummaryStatistics[] stats;
+    private boolean shouldLog = false;
+    private boolean hasLogged = false;
+
+    LoggingAction(int valueCount) {
+      stats = new SummaryStatistics[valueCount];
+      for (int i = 0; i < stats.length; i++) {
+        stats[i] = new SummaryStatistics();
+      }
+    }
+
+    public int getCount() {
+      return count;
+    }
+
+    public SummaryStatistics getStats(int idx) {
+      if (idx < 0 || idx >= stats.length) {
+        throw new IllegalArgumentException("Requested stats at idx " + idx +
+            " but this log only maintains " + stats.length + " stats");
+      }
+      return stats[idx];
+    }
+
+    public boolean shouldLog() {
+      return shouldLog;
+    }
+
+    private void setShouldLog() {
+      shouldLog = true;
+    }
+
+    private boolean hasLogged() {
+      return hasLogged;
+    }
+
+    private void setHasLogged() {
+      hasLogged = true;
+    }
+
+    private void recordValues(double... values) {
+      if (values.length != stats.length) {
+        throw new IllegalArgumentException("received " + values.length +
+            " values but expected " + stats.length);
+      }
+      count++;
+      for (int i = 0; i < values.length; i++) {
+        stats[i].addValue(values[i]);
+      }
+    }
+
+  }
+
+  /**
+   * A non-logging action.
+   *
+   * @see #DO_NOT_LOG
+   */
+  private static class NoLogAction implements LogAction {
+
+    public int getCount() {
+      throw new IllegalStateException("Cannot be logged yet!");
+    }
+
+    public SummaryStatistics getStats(int idx) {
+      throw new IllegalStateException("Cannot be logged yet!");
+    }
+
+    public boolean shouldLog() {
+      return false;
+    }
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a30b4f9e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogThrottlingHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogThrottlingHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogThrottlingHelper.java
new file mode 100644
index 0000000..a675d0a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogThrottlingHelper.java
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.log;
+
+import org.apache.hadoop.log.LogThrottlingHelper.LogAction;
+import org.apache.hadoop.util.FakeTimer;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests for {@link LogThrottlingHelper}.
+ */
+public class TestLogThrottlingHelper {
+
+  private static final int LOG_PERIOD = 100;
+
+  private LogThrottlingHelper helper;
+  private FakeTimer timer;
+
+  @Before
+  public void setup() {
+    timer = new FakeTimer();
+    helper = new LogThrottlingHelper(LOG_PERIOD, null, timer);
+  }
+
+  @Test
+  public void testBasicLogging() {
+    assertTrue(helper.record().shouldLog());
+
+    for (int i = 0; i < 5; i++) {
+      timer.advance(LOG_PERIOD / 10);
+      assertFalse(helper.record().shouldLog());
+    }
+    timer.advance(LOG_PERIOD);
+    assertTrue(helper.record().shouldLog());
+  }
+
+  @Test
+  public void testLoggingWithValue() {
+    assertTrue(helper.record(1).shouldLog());
+
+    for (int i = 0; i < 4; i++) {
+      timer.advance(LOG_PERIOD / 5);
+      assertFalse(helper.record(i % 2 == 0 ? 0 : 1).shouldLog());
+    }
+
+    timer.advance(LOG_PERIOD);
+    LogAction action = helper.record(0.5);
+    assertTrue(action.shouldLog());
+    assertEquals(5, action.getCount());
+    assertEquals(0.5, action.getStats(0).getMean(), 0.01);
+    assertEquals(1.0, action.getStats(0).getMax(), 0.01);
+    assertEquals(0.0, action.getStats(0).getMin(), 0.01);
+  }
+
+  @Test
+  public void testLoggingWithMultipleValues() {
+    assertTrue(helper.record(1).shouldLog());
+
+    for (int i = 0; i < 4; i++) {
+      timer.advance(LOG_PERIOD / 5);
+      int base = i % 2 == 0 ? 0 : 1;
+      assertFalse(helper.record(base, base * 2).shouldLog());
+    }
+
+    timer.advance(LOG_PERIOD);
+    LogAction action = helper.record(0.5, 1.0);
+    assertTrue(action.shouldLog());
+    assertEquals(5, action.getCount());
+    for (int i = 1; i <= 2; i++) {
+      assertEquals(0.5 * i, action.getStats(i - 1).getMean(), 0.01);
+      assertEquals(1.0 * i, action.getStats(i - 1).getMax(), 0.01);
+      assertEquals(0.0, action.getStats(i - 1).getMin(), 0.01);
+    }
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testLoggingWithInconsistentValues() {
+    assertTrue(helper.record(1, 2).shouldLog());
+    helper.record(1, 2);
+    helper.record(1, 2, 3);
+  }
+
+  @Test
+  public void testNamedLoggersWithoutSpecifiedPrimary() {
+    assertTrue(helper.record("foo", 0).shouldLog());
+    assertTrue(helper.record("bar", 0).shouldLog());
+
+    assertFalse(helper.record("foo", LOG_PERIOD / 2).shouldLog());
+    assertFalse(helper.record("bar", LOG_PERIOD / 2).shouldLog());
+
+    assertTrue(helper.record("foo", LOG_PERIOD).shouldLog());
+    assertTrue(helper.record("bar", LOG_PERIOD).shouldLog());
+
+    assertFalse(helper.record("foo", (LOG_PERIOD * 3) / 2).shouldLog());
+    assertFalse(helper.record("bar", (LOG_PERIOD * 3) / 2).shouldLog());
+
+    assertFalse(helper.record("bar", LOG_PERIOD * 2).shouldLog());
+    assertTrue(helper.record("foo", LOG_PERIOD * 2).shouldLog());
+    assertTrue(helper.record("bar", LOG_PERIOD * 2).shouldLog());
+  }
+
+  @Test
+  public void testPrimaryAndDependentLoggers() {
+    helper = new LogThrottlingHelper(LOG_PERIOD, "foo", timer);
+
+    assertTrue(helper.record("foo", 0).shouldLog());
+    assertTrue(helper.record("bar", 0).shouldLog());
+    assertFalse(helper.record("bar", 0).shouldLog());
+    assertFalse(helper.record("foo", 0).shouldLog());
+
+    assertFalse(helper.record("foo", LOG_PERIOD / 2).shouldLog());
+    assertFalse(helper.record("bar", LOG_PERIOD / 2).shouldLog());
+
+    // Both should log once the period has elapsed
+    assertTrue(helper.record("foo", LOG_PERIOD).shouldLog());
+    assertTrue(helper.record("bar", LOG_PERIOD).shouldLog());
+
+    // "bar" should not log yet because "foo" hasn't been triggered
+    assertFalse(helper.record("bar", LOG_PERIOD * 2).shouldLog());
+    assertTrue(helper.record("foo", LOG_PERIOD * 2).shouldLog());
+    // The timing of "bar" shouldn't matter as it is dependent on "foo"
+    assertTrue(helper.record("bar", 0).shouldLog());
+  }
+
+  @Test
+  public void testMultipleLoggersWithValues() {
+    helper = new LogThrottlingHelper(LOG_PERIOD, "foo", timer);
+
+    assertTrue(helper.record("foo", 0).shouldLog());
+    assertTrue(helper.record("bar", 0, 2).shouldLog());
+    assertTrue(helper.record("baz", 0, 3, 3).shouldLog());
+
+    // "bar"/"baz" should not log yet because "foo" hasn't been triggered
+    assertFalse(helper.record("bar", LOG_PERIOD, 2).shouldLog());
+    assertFalse(helper.record("baz", LOG_PERIOD, 3, 3).shouldLog());
+
+    // All should log once the period has elapsed
+    LogAction foo = helper.record("foo", LOG_PERIOD);
+    LogAction bar = helper.record("bar", LOG_PERIOD, 2);
+    LogAction baz = helper.record("baz", LOG_PERIOD, 3, 3);
+    assertTrue(foo.shouldLog());
+    assertTrue(bar.shouldLog());
+    assertTrue(baz.shouldLog());
+    assertEquals(1, foo.getCount());
+    assertEquals(2, bar.getCount());
+    assertEquals(2, baz.getCount());
+    assertEquals(2.0, bar.getStats(0).getMean(), 0.01);
+    assertEquals(3.0, baz.getStats(0).getMean(), 0.01);
+    assertEquals(3.0, baz.getStats(1).getMean(), 0.01);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a30b4f9e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
index 5992e54..7c28465 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
@@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.log.LogThrottlingHelper;
 import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Timer;
@@ -40,6 +41,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORT
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
+import static org.apache.hadoop.log.LogThrottlingHelper.LogAction;
 
 /**
  * Mimics a ReentrantReadWriteLock but does not directly implement the interface
@@ -74,11 +76,8 @@ class FSNamesystemLock {
   private final long writeLockReportingThresholdMs;
   /** Last time stamp for write lock. Keep the longest one for multi-entrance.*/
   private long writeLockHeldTimeStampNanos;
-  private int numWriteLockWarningsSuppressed = 0;
-  /** Time stamp (ms) of the last time a write lock report was written. */
-  private long timeStampOfLastWriteLockReportMs = 0;
-  /** Longest time (ms) a write lock was held since the last report. */
-  private long longestWriteLockHeldIntervalMs = 0;
+  /** Frequency limiter used for reporting long write lock hold times. */
+  private final LogThrottlingHelper writeLockReportLogger;
 
   /** Threshold (ms) for long holding read lock report. */
   private final long readLockReportingThresholdMs;
@@ -132,6 +131,8 @@ class FSNamesystemLock {
     this.lockSuppressWarningIntervalMs = conf.getTimeDuration(
         DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
         DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
+    this.writeLockReportLogger =
+        new LogThrottlingHelper(lockSuppressWarningIntervalMs);
     this.metricsEnabled = conf.getBoolean(
         DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY,
         DFS_NAMENODE_LOCK_DETAILED_METRICS_DEFAULT);
@@ -251,25 +252,11 @@ class FSNamesystemLock {
     final long writeLockIntervalMs =
         TimeUnit.NANOSECONDS.toMillis(writeLockIntervalNanos);
 
-    boolean logReport = false;
-    int numSuppressedWarnings = 0;
-    long longestLockHeldIntervalMs = 0;
+    LogAction logAction = LogThrottlingHelper.DO_NOT_LOG;
     if (needReport &&
         writeLockIntervalMs >= this.writeLockReportingThresholdMs) {
-      if (writeLockIntervalMs > longestWriteLockHeldIntervalMs) {
-        longestWriteLockHeldIntervalMs = writeLockIntervalMs;
-      }
-      if (currentTimeMs - timeStampOfLastWriteLockReportMs >
-          this.lockSuppressWarningIntervalMs) {
-        logReport = true;
-        numSuppressedWarnings = numWriteLockWarningsSuppressed;
-        numWriteLockWarningsSuppressed = 0;
-        longestLockHeldIntervalMs = longestWriteLockHeldIntervalMs;
-        longestWriteLockHeldIntervalMs = 0;
-        timeStampOfLastWriteLockReportMs = currentTimeMs;
-      } else {
-        numWriteLockWarningsSuppressed++;
-      }
+      logAction = writeLockReportLogger
+          .record("write", currentTimeMs, writeLockIntervalMs);
     }
 
     coarseLock.writeLock().unlock();
@@ -278,13 +265,14 @@ class FSNamesystemLock {
       addMetric(opName, writeLockIntervalNanos, true);
     }
 
-    if (logReport) {
-      FSNamesystem.LOG.info("FSNamesystem write lock held for " +
-          writeLockIntervalMs + " ms via\n" +
-          StringUtils.getStackTrace(Thread.currentThread()) +
-          "\tNumber of suppressed write-lock reports: " +
-          numSuppressedWarnings + "\n\tLongest write-lock held interval: " +
-          longestLockHeldIntervalMs);
+    if (logAction.shouldLog()) {
+      FSNamesystem.LOG.info("FSNamesystem write lock held for {} ms via {}\t" +
+          "Number of suppressed write-lock reports: {}\n\tLongest write-lock " +
+          "held interval: {} \n\tTotal suppressed write-lock held time: {}",
+          writeLockIntervalMs,
+          StringUtils.getStackTrace(Thread.currentThread()),
+          logAction.getCount() - 1, logAction.getStats(0).getMax(),
+          logAction.getStats(0).getSum() - writeLockIntervalMs);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDDS-513. Check if the EventQueue is not closed before executing handlers. Contributed by Nanda Kumar.

Posted by xy...@apache.org.
HDDS-513. Check if the EventQueue is not closed before executing handlers. Contributed by Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6bb1ca3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6bb1ca3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6bb1ca3

Branch: refs/heads/HDDS-4
Commit: f6bb1ca3c16eb5825188b51d45f32be111266d88
Parents: 2eb597b
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Wed Sep 19 14:35:29 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Wed Sep 19 14:35:29 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdds/server/events/EventQueue.java     | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6bb1ca3/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index b2b0df2..9aeab7b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -55,6 +55,8 @@ public class EventQueue implements EventPublisher, AutoCloseable {
 
   private final AtomicLong eventCount = new AtomicLong(0);
 
+  private boolean isRunning = true;
+
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
       EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
     this.addHandler(event, handler, generateHandlerName(handler));
@@ -116,6 +118,10 @@ public class EventQueue implements EventPublisher, AutoCloseable {
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
       EVENT_TYPE event, EventExecutor<PAYLOAD> executor,
       EventHandler<PAYLOAD> handler) {
+    if (!isRunning) {
+      LOG.warn("Not adding handler for {}, EventQueue is not running", event);
+      return;
+    }
     validateEvent(event);
     executors.putIfAbsent(event, new HashMap<>());
     executors.get(event).putIfAbsent(executor, new ArrayList<>());
@@ -136,6 +142,11 @@ public class EventQueue implements EventPublisher, AutoCloseable {
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
       EVENT_TYPE event, PAYLOAD payload) {
 
+    if (!isRunning) {
+      LOG.warn("Processing of {} is skipped, EventQueue is not running", event);
+      return;
+    }
+
     Map<EventExecutor, List<EventHandler>> eventExecutorListMap =
         this.executors.get(event);
 
@@ -187,6 +198,11 @@ public class EventQueue implements EventPublisher, AutoCloseable {
     long currentTime = Time.now();
     while (true) {
 
+      if (!isRunning) {
+        LOG.warn("Processing of event skipped. EventQueue is not running");
+        return;
+      }
+
       long processed = 0;
 
       Stream<EventExecutor> allExecutor = this.executors.values().stream()
@@ -216,6 +232,8 @@ public class EventQueue implements EventPublisher, AutoCloseable {
 
   public void close() {
 
+    isRunning = false;
+
     Set<EventExecutor> allExecutors = this.executors.values().stream()
         .flatMap(handlerMap -> handlerMap.keySet().stream())
         .collect(Collectors.toSet());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: HDFS-13908. TestDataNodeMultipleRegistrations is flaky. Contributed by Ayush Saxena.

Posted by xy...@apache.org.
HDFS-13908. TestDataNodeMultipleRegistrations is flaky. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56e0d635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56e0d635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56e0d635

Branch: refs/heads/HDDS-4
Commit: 56e0d635e0f49772d001c1fdb385110c705a622a
Parents: 61a4b07
Author: Inigo Goiri <in...@apache.org>
Authored: Wed Sep 19 09:30:25 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed Sep 19 09:30:25 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/TestDataNodeMultipleRegistrations.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56e0d635/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
index 4a49477..bd28fde 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
@@ -250,7 +250,7 @@ public class TestDataNodeMultipleRegistrations {
     try {
       cluster.startDataNodes(conf, 1, true, null, null);
       // let the initialization be complete
-      Thread.sleep(10000);
+      cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
       assertTrue("Datanode should be running", dn.isDatanodeUp());
       assertEquals("Only one BPOfferService should be running", 1,
@@ -274,7 +274,7 @@ public class TestDataNodeMultipleRegistrations {
     try {
       cluster.startDataNodes(conf, 1, true, null, null);
       // let the initialization be complete
-      Thread.sleep(10000);
+      cluster.waitActive();
       DataNode dn = cluster.getDataNodes().get(0);
       assertTrue("Datanode should be running", dn.isDatanodeUp());
       assertEquals("BPOfferService should be running", 1,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HDDS-476. Add Pipeline reports to make pipeline active on SCM restart. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index 82946bd..59d937e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -16,9 +16,12 @@
  */
 package org.apache.hadoop.hdds.scm.pipelines;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
@@ -30,6 +33,7 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap;
 import org.apache.hadoop.hdds.scm.pipelines.ratis.RatisManagerImpl;
 import org.apache.hadoop.hdds.scm.pipelines.standalone.StandaloneManagerImpl;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -75,11 +79,9 @@ public class PipelineSelector {
   private static final Logger LOG =
       LoggerFactory.getLogger(PipelineSelector.class);
   private final ContainerPlacementPolicy placementPolicy;
-  private final NodeManager nodeManager;
+  private final Map<ReplicationType, PipelineManager> pipelineManagerMap;
   private final Configuration conf;
   private final EventPublisher eventPublisher;
-  private final RatisManagerImpl ratisManager;
-  private final StandaloneManagerImpl standaloneManager;
   private final long containerSize;
   private final MetadataStore pipelineStore;
   private final PipelineStateManager stateManager;
@@ -96,7 +98,6 @@ public class PipelineSelector {
    */
   public PipelineSelector(NodeManager nodeManager, Configuration conf,
       EventPublisher eventPublisher, int cacheSizeMB) throws IOException {
-    this.nodeManager = nodeManager;
     this.conf = conf;
     this.eventPublisher = eventPublisher;
     this.placementPolicy = createContainerPlacementPolicy(nodeManager, conf);
@@ -106,12 +107,14 @@ public class PipelineSelector {
         StorageUnit.BYTES);
     node2PipelineMap = new Node2PipelineMap();
     pipelineMap = new ConcurrentHashMap<>();
-    this.standaloneManager =
-        new StandaloneManagerImpl(this.nodeManager, placementPolicy,
-            containerSize);
-    this.ratisManager =
-        new RatisManagerImpl(this.nodeManager, placementPolicy, containerSize,
-            conf);
+    pipelineManagerMap = new HashMap<>();
+
+    pipelineManagerMap.put(ReplicationType.STAND_ALONE,
+            new StandaloneManagerImpl(nodeManager, placementPolicy,
+            containerSize));
+    pipelineManagerMap.put(ReplicationType.RATIS,
+            new RatisManagerImpl(nodeManager, placementPolicy,
+                    containerSize, conf));
     long pipelineCreationLeaseTimeout = conf.getTimeDuration(
         ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT,
         ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_LEASE_TIMEOUT_DEFAULT,
@@ -154,6 +157,7 @@ public class PipelineSelector {
     }
   }
 
+  @VisibleForTesting
   public Set<ContainerID> getOpenContainerIDsByPipeline(PipelineID pipelineID) {
     return pipeline2ContainerMap.get(pipelineID);
   }
@@ -227,30 +231,6 @@ public class PipelineSelector {
   }
 
   /**
-   * Return the pipeline manager from the replication type.
-   *
-   * @param replicationType - Replication Type Enum.
-   * @return pipeline Manager.
-   * @throws IllegalArgumentException If an pipeline type gets added
-   * and this function is not modified we will throw.
-   */
-  private PipelineManager getPipelineManager(ReplicationType replicationType)
-      throws IllegalArgumentException {
-    switch (replicationType) {
-    case RATIS:
-      return this.ratisManager;
-    case STAND_ALONE:
-      return this.standaloneManager;
-    case CHAINED:
-      throw new IllegalArgumentException("Not implemented yet");
-    default:
-      throw new IllegalArgumentException("Unexpected enum found. Does not" +
-          " know how to handle " + replicationType.toString());
-    }
-
-  }
-
-  /**
    * This function is called by the Container Manager while allocating a new
    * container. The client specifies what kind of replication pipeline is needed
    * and based on the replication type in the request appropriate Interface is
@@ -260,7 +240,7 @@ public class PipelineSelector {
   public Pipeline getReplicationPipeline(ReplicationType replicationType,
       HddsProtos.ReplicationFactor replicationFactor)
       throws IOException {
-    PipelineManager manager = getPipelineManager(replicationType);
+    PipelineManager manager = pipelineManagerMap.get(replicationType);
     Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
     LOG.debug("Getting replication pipeline forReplicationType {} :" +
             " ReplicationFactor {}", replicationType.toString(),
@@ -316,7 +296,7 @@ public class PipelineSelector {
    * Finalize a given pipeline.
    */
   public void finalizePipeline(Pipeline pipeline) throws IOException {
-    PipelineManager manager = getPipelineManager(pipeline.getType());
+    PipelineManager manager = pipelineManagerMap.get(pipeline.getType());
     Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
     if (pipeline.getLifeCycleState() == LifeCycleState.CLOSING ||
         pipeline.getLifeCycleState() == LifeCycleState.CLOSED) {
@@ -327,17 +307,17 @@ public class PipelineSelector {
     }
 
     // Remove the pipeline from active allocation
-    manager.finalizePipeline(pipeline);
-
-    LOG.info("Finalizing pipeline. pipelineID: {}", pipeline.getId());
-    updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.FINALIZE);
-    closePipelineIfNoOpenContainers(pipeline);
+    if (manager.finalizePipeline(pipeline)) {
+      LOG.info("Finalizing pipeline. pipelineID: {}", pipeline.getId());
+      updatePipelineState(pipeline, HddsProtos.LifeCycleEvent.FINALIZE);
+      closePipelineIfNoOpenContainers(pipeline);
+    }
   }
 
   /**
    * Close a given pipeline.
    */
-  public void closePipelineIfNoOpenContainers(Pipeline pipeline)
+  private void closePipelineIfNoOpenContainers(Pipeline pipeline)
       throws IOException {
     if (pipeline.getLifeCycleState() != LifeCycleState.CLOSING) {
       return;
@@ -354,7 +334,7 @@ public class PipelineSelector {
    * Close a given pipeline.
    */
   private void closePipeline(Pipeline pipeline) throws IOException {
-    PipelineManager manager = getPipelineManager(pipeline.getType());
+    PipelineManager manager = pipelineManagerMap.get(pipeline.getType());
     Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
     LOG.debug("Closing pipeline. pipelineID: {}", pipeline.getId());
     HashSet<ContainerID> containers =
@@ -367,7 +347,7 @@ public class PipelineSelector {
    * Add to a given pipeline.
    */
   private void addOpenPipeline(Pipeline pipeline) {
-    PipelineManager manager = getPipelineManager(pipeline.getType());
+    PipelineManager manager = pipelineManagerMap.get(pipeline.getType());
     Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
     LOG.debug("Adding Open pipeline. pipelineID: {}", pipeline.getId());
     manager.addOpenPipeline(pipeline);
@@ -381,7 +361,7 @@ public class PipelineSelector {
     }
   }
 
-  public Set<PipelineID> getPipelineId(UUID dnId) {
+  public Set<PipelineID> getPipelineByDnID(UUID dnId) {
     return node2PipelineMap.getPipelines(dnId);
   }
 
@@ -400,6 +380,9 @@ public class PipelineSelector {
       pipelineMap.put(pipeline.getId(), pipeline);
       pipeline2ContainerMap.put(pipeline.getId(), new HashSet<>());
       node2PipelineMap.addPipeline(pipeline);
+      // reset the datanodes in the pipeline
+      // they will be reset on
+      pipeline.resetPipeline();
       break;
     case CLOSED:
       // if the pipeline is in closed state, nothing to do.
@@ -409,6 +392,36 @@ public class PipelineSelector {
     }
   }
 
+  public void handleStaleNode(DatanodeDetails dn) {
+    Set<PipelineID> pipelineIDs = getPipelineByDnID(dn.getUuid());
+    for (PipelineID id : pipelineIDs) {
+      LOG.info("closing pipeline {}.", id);
+      eventPublisher.fireEvent(SCMEvents.PIPELINE_CLOSE, id);
+    }
+  }
+
+  void processPipelineReport(DatanodeDetails dn,
+                                    PipelineReportsProto pipelineReport) {
+    Set<PipelineID> reportedPipelines = new HashSet<>();
+    pipelineReport.getPipelineReportList().
+            forEach(p ->
+                    reportedPipelines.add(
+                            processPipelineReport(p.getPipelineID(), dn)));
+
+    //TODO: handle missing pipelines and new pipelines later
+  }
+
+  private PipelineID processPipelineReport(
+          HddsProtos.PipelineID id, DatanodeDetails dn) {
+    PipelineID pipelineID = PipelineID.getFromProtobuf(id);
+    Pipeline pipeline = pipelineMap.get(pipelineID);
+    if (pipeline != null) {
+      pipelineManagerMap.get(pipeline.getType())
+              .processPipelineReport(pipeline, dn);
+    }
+    return pipelineID;
+  }
+
   /**
    * Update the Pipeline State to the next state.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
index d3cec88..905a5b5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
@@ -73,20 +73,19 @@ public class RatisManagerImpl extends PipelineManager {
   public Pipeline allocatePipeline(ReplicationFactor factor) {
     List<DatanodeDetails> newNodesList = new LinkedList<>();
     List<DatanodeDetails> datanodes = nodeManager.getNodes(NodeState.HEALTHY);
-    int count = getReplicationCount(factor);
     //TODO: Add Raft State to the Nodes, so we can query and skip nodes from
     // data from datanode instead of maintaining a set.
     for (DatanodeDetails datanode : datanodes) {
       Preconditions.checkNotNull(datanode);
       if (!ratisMembers.contains(datanode)) {
         newNodesList.add(datanode);
-        if (newNodesList.size() == count) {
+        if (newNodesList.size() == factor.getNumber()) {
           // once a datanode has been added to a pipeline, exclude it from
           // further allocations
           ratisMembers.addAll(newNodesList);
           PipelineID pipelineID = PipelineID.randomId();
           LOG.info("Allocating a new ratis pipeline of size: {} id: {}",
-              count, pipelineID);
+                  factor.getNumber(), pipelineID);
           return PipelineSelector.newPipelineFromNodes(newNodesList,
               ReplicationType.RATIS, factor, pipelineID);
         }
@@ -103,6 +102,17 @@ public class RatisManagerImpl extends PipelineManager {
     }
   }
 
+  public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) {
+    super.processPipelineReport(pipeline, dn);
+    ratisMembers.add(dn);
+  }
+
+  public synchronized boolean finalizePipeline(Pipeline pipeline) {
+    activePipelines.get(pipeline.getFactor().ordinal())
+            .removePipeline(pipeline.getId());
+    return true;
+  }
+
   /**
    * Close the pipeline.
    */
@@ -116,29 +126,4 @@ public class RatisManagerImpl extends PipelineManager {
       Preconditions.checkArgument(ratisMembers.remove(node));
     }
   }
-
-  /**
-   * list members in the pipeline .
-   *
-   * @param pipelineID
-   * @return the datanode
-   */
-  @Override
-  public List<DatanodeDetails> getMembers(PipelineID pipelineID)
-      throws IOException {
-    return null;
-  }
-
-  /**
-   * Update the datanode list of the pipeline.
-   *
-   * @param pipelineID
-   * @param newDatanodes
-   */
-  @Override
-  public void updatePipeline(PipelineID pipelineID,
-                             List<DatanodeDetails> newDatanodes)
-      throws IOException {
-
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
index ed2fc2f..045afb6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
@@ -74,18 +74,19 @@ public class StandaloneManagerImpl extends PipelineManager {
   public Pipeline allocatePipeline(ReplicationFactor factor) {
     List<DatanodeDetails> newNodesList = new LinkedList<>();
     List<DatanodeDetails> datanodes = nodeManager.getNodes(NodeState.HEALTHY);
-    int count = getReplicationCount(factor);
     for (DatanodeDetails datanode : datanodes) {
       Preconditions.checkNotNull(datanode);
       if (!standAloneMembers.contains(datanode)) {
         newNodesList.add(datanode);
-        if (newNodesList.size() == count) {
+        if (newNodesList.size() == factor.getNumber()) {
           // once a datanode has been added to a pipeline, exclude it from
           // further allocations
           standAloneMembers.addAll(newNodesList);
-          PipelineID pipelineID = PipelineID.randomId();
+          // Standalone pipeline use node id as pipeline
+          PipelineID pipelineID =
+                  PipelineID.valueOf(newNodesList.get(0).getUuid());
           LOG.info("Allocating a new standalone pipeline of size: {} id: {}",
-              count, pipelineID);
+              factor.getNumber(), pipelineID);
           return PipelineSelector.newPipelineFromNodes(newNodesList,
               ReplicationType.STAND_ALONE, ReplicationFactor.ONE, pipelineID);
         }
@@ -98,6 +99,17 @@ public class StandaloneManagerImpl extends PipelineManager {
     // Nothing to be done for standalone pipeline
   }
 
+  public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) {
+    super.processPipelineReport(pipeline, dn);
+    standAloneMembers.add(dn);
+  }
+
+  public synchronized boolean finalizePipeline(Pipeline pipeline) {
+    activePipelines.get(pipeline.getFactor().ordinal())
+            .removePipeline(pipeline.getId());
+    return false;
+  }
+
   /**
    * Close the pipeline.
    */
@@ -107,28 +119,4 @@ public class StandaloneManagerImpl extends PipelineManager {
       Preconditions.checkArgument(standAloneMembers.remove(node));
     }
   }
-
-  /**
-   * list members in the pipeline .
-   *
-   * @param pipelineID
-   * @return the datanode
-   */
-  @Override
-  public List<DatanodeDetails> getMembers(PipelineID pipelineID)
-      throws IOException {
-    return null;
-  }
-
-  /**
-   * Update the datanode list of the pipeline.
-   *
-   * @param pipelineID
-   * @param newDatanodes
-   */
-  @Override
-  public void updatePipeline(PipelineID pipelineID, List<DatanodeDetails>
-      newDatanodes) throws IOException {
-
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index a651f62..e65de8b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdds.scm.server;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerActionsProto;
@@ -46,6 +48,7 @@ import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_ACTIONS;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT;
 
 /**
  * This class is responsible for dispatching heartbeat from datanode to
@@ -103,6 +106,14 @@ public final class SCMDatanodeHeartbeatDispatcher {
               heartbeat.getContainerActions()));
     }
 
+    if (heartbeat.hasPipelineReports()) {
+      LOG.debug("Dispatching Pipeline Report.");
+      eventPublisher.fireEvent(PIPELINE_REPORT,
+              new PipelineReportFromDatanode(datanodeDetails,
+                      heartbeat.getPipelineReports()));
+
+    }
+
     if (heartbeat.hasPipelineActions()) {
       LOG.debug("Dispatching Pipeline Actions.");
       eventPublisher.fireEvent(PIPELINE_ACTIONS,
@@ -179,6 +190,18 @@ public final class SCMDatanodeHeartbeatDispatcher {
   }
 
   /**
+   * Pipeline report event payload with origin.
+   */
+  public static class PipelineReportFromDatanode
+          extends ReportFromDatanode<PipelineReportsProto> {
+
+    public PipelineReportFromDatanode(DatanodeDetails datanodeDetails,
+                                      PipelineReportsProto report) {
+      super(datanodeDetails, report);
+    }
+  }
+
+  /**
    * Pipeline action event payload with origin.
    */
   public static class PipelineActionsFromDatanode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 8a09dc8..4a0d3e5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
@@ -74,7 +76,10 @@ import static org.apache.hadoop.hdds.protocol.proto
 
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+    .ReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+        .PipelineReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -102,6 +107,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRES
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY;
 
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT;
 import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer;
 import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
 
@@ -190,13 +196,14 @@ public class SCMDatanodeProtocolServer implements
   public SCMRegisteredResponseProto register(
       HddsProtos.DatanodeDetailsProto datanodeDetailsProto,
       NodeReportProto nodeReport,
-      ContainerReportsProto containerReportsProto)
+      ContainerReportsProto containerReportsProto,
+          PipelineReportsProto pipelineReportsProto)
       throws IOException {
     DatanodeDetails datanodeDetails = DatanodeDetails
         .getFromProtoBuf(datanodeDetailsProto);
     // TODO : Return the list of Nodes that forms the SCM HA.
     RegisteredCommand registeredCommand = scm.getScmNodeManager()
-        .register(datanodeDetails, nodeReport);
+        .register(datanodeDetails, nodeReport, pipelineReportsProto);
     if (registeredCommand.getError()
         == SCMRegisteredResponseProto.ErrorCode.success) {
       scm.getScmContainerManager().processContainerReports(datanodeDetails,
@@ -204,6 +211,9 @@ public class SCMDatanodeProtocolServer implements
       eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
           new NodeRegistrationContainerReport(datanodeDetails,
               containerReportsProto));
+      eventPublisher.fireEvent(PIPELINE_REPORT,
+              new PipelineReportFromDatanode(datanodeDetails,
+                      pipelineReportsProto));
     }
     return getRegisteredResponse(registeredCommand);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 8e76606..2169149 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.hdds.scm.node.StaleNodeHandler;
 import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineCloseHandler;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineActionEventHandler;
+import org.apache.hadoop.hdds.scm.pipelines.PipelineReportHandler;
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
@@ -217,13 +218,16 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         new CloseContainerEventHandler(scmContainerManager);
     NodeReportHandler nodeReportHandler =
         new NodeReportHandler(scmNodeManager);
-
+    PipelineReportHandler pipelineReportHandler =
+            new PipelineReportHandler(
+                    scmContainerManager.getPipelineSelector());
     CommandStatusReportHandler cmdStatusReportHandler =
         new CommandStatusReportHandler();
 
     NewNodeHandler newNodeHandler = new NewNodeHandler(node2ContainerMap);
     StaleNodeHandler staleNodeHandler =
-        new StaleNodeHandler(node2ContainerMap, scmContainerManager);
+        new StaleNodeHandler(node2ContainerMap,
+                scmContainerManager.getPipelineSelector());
     DeadNodeHandler deadNodeHandler = new DeadNodeHandler(node2ContainerMap,
         getScmContainerManager().getStateManager());
     ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
@@ -240,7 +244,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         new PipelineActionEventHandler();
 
     PipelineCloseHandler pipelineCloseHandler =
-        new PipelineCloseHandler(scmContainerManager);
+        new PipelineCloseHandler(scmContainerManager.getPipelineSelector());
 
     long watcherTimeout =
         conf.getTimeDuration(ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT,
@@ -300,6 +304,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS,
         (BlockManagerImpl) scmBlockManager);
     eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, clientProtocolServer);
+    eventQueue.addHandler(SCMEvents.PIPELINE_REPORT, pipelineReportHandler);
 
     registerMXBean();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 7af9dda..24a16c7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -17,6 +17,8 @@
 package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.mockito.Mockito;
 import static org.mockito.Mockito.when;
 
@@ -139,7 +141,8 @@ public final class TestUtils {
   public static DatanodeDetails createRandomDatanodeAndRegister(
       SCMNodeManager nodeManager) {
     return getDatanodeDetails(
-        nodeManager.register(randomDatanodeDetails(), null));
+        nodeManager.register(randomDatanodeDetails(), null,
+                getRandomPipelineReports()));
   }
 
   /**
@@ -299,6 +302,11 @@ public final class TestUtils {
     return getContainerReports(containerInfos);
   }
 
+
+  public static PipelineReportsProto getRandomPipelineReports() {
+    return PipelineReportsProto.newBuilder().build();
+  }
+
   /**
    * Creates container report with the given ContainerInfo(s).
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 088b700..21e44a3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -16,6 +16,8 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
@@ -356,7 +358,7 @@ public class MockNodeManager implements NodeManager {
    */
   @Override
   public RegisteredCommand register(DatanodeDetails datanodeDetails,
-      NodeReportProto nodeReport) {
+      NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index f438c8b..cbe96ee 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -286,7 +286,8 @@ public class TestNodeManager {
         TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null);
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
       nodemanager.register(datanodeDetails,
-          TestUtils.createNodeReport(report));
+          TestUtils.createNodeReport(report),
+          TestUtils.getRandomPipelineReports());
       List<SCMCommand> command = nodemanager.processHeartbeat(datanodeDetails);
       Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails));
       Assert.assertTrue("On regular HB calls, SCM responses a "
@@ -1122,7 +1123,8 @@ public class TestNodeManager {
       eq.addHandler(DATANODE_COMMAND, nodemanager);
 
       nodemanager
-          .register(datanodeDetails, TestUtils.createNodeReport(report));
+          .register(datanodeDetails, TestUtils.createNodeReport(report),
+                  TestUtils.getRandomPipelineReports());
       eq.fireEvent(DATANODE_COMMAND,
           new CommandForDatanode<>(datanodeDetails.getUuid(),
               new CloseContainerCommand(1L, ReplicationType.STAND_ALONE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
index 14a74e9..ec1d527 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
@@ -116,7 +116,7 @@ public class TestNode2ContainerMap {
     Assert.assertTrue(map.isKnownDatanode(key));
     ReportResult result = map.processReport(key, values);
     Assert.assertEquals(result.getStatus(),
-        Node2ContainerMap.ReportStatus.ALL_IS_WELL);
+        ReportResult.ReportStatus.ALL_IS_WELL);
   }
 
   @Test
@@ -181,9 +181,9 @@ public class TestNode2ContainerMap {
     UUID key = getFirstKey();
     TreeSet<ContainerID> values = testData.get(key);
     ReportResult result = map.processReport(key, values);
-    Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_DATANODE_FOUND,
+    Assert.assertEquals(ReportResult.ReportStatus.NEW_DATANODE_FOUND,
         result.getStatus());
-    Assert.assertEquals(result.getNewContainers().size(), values.size());
+    Assert.assertEquals(result.getNewEntries().size(), values.size());
   }
 
   /**
@@ -216,15 +216,15 @@ public class TestNode2ContainerMap {
     ReportResult result = map.processReport(key, newContainersSet);
 
     //Assert that expected size of missing container is same as addedContainers
-    Assert.assertEquals(Node2ContainerMap.ReportStatus.NEW_CONTAINERS_FOUND,
+    Assert.assertEquals(ReportResult.ReportStatus.NEW_ENTRIES_FOUND,
         result.getStatus());
 
     Assert.assertEquals(addedContainers.size(),
-        result.getNewContainers().size());
+        result.getNewEntries().size());
 
     // Assert that the Container IDs are the same as we added new.
     Assert.assertTrue("All objects are not removed.",
-        result.getNewContainers().removeAll(addedContainers));
+        result.getNewEntries().removeAll(addedContainers));
   }
 
   /**
@@ -261,14 +261,14 @@ public class TestNode2ContainerMap {
 
 
     //Assert that expected size of missing container is same as addedContainers
-    Assert.assertEquals(Node2ContainerMap.ReportStatus.MISSING_CONTAINERS,
+    Assert.assertEquals(ReportResult.ReportStatus.MISSING_ENTRIES,
         result.getStatus());
     Assert.assertEquals(removedContainers.size(),
-        result.getMissingContainers().size());
+        result.getMissingEntries().size());
 
     // Assert that the Container IDs are the same as we added new.
     Assert.assertTrue("All missing containers not found.",
-        result.getMissingContainers().removeAll(removedContainers));
+        result.getMissingEntries().removeAll(removedContainers));
   }
 
   @Test
@@ -307,21 +307,21 @@ public class TestNode2ContainerMap {
 
 
     Assert.assertEquals(
-        Node2ContainerMap.ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND,
+            ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND,
         result.getStatus());
     Assert.assertEquals(removedContainers.size(),
-        result.getMissingContainers().size());
+        result.getMissingEntries().size());
 
 
     // Assert that the Container IDs are the same as we added new.
     Assert.assertTrue("All missing containers not found.",
-        result.getMissingContainers().removeAll(removedContainers));
+        result.getMissingEntries().removeAll(removedContainers));
 
     Assert.assertEquals(insertedSet.size(),
-        result.getNewContainers().size());
+        result.getNewEntries().size());
 
     // Assert that the Container IDs are the same as we added new.
     Assert.assertTrue("All inserted containers are not found.",
-        result.getNewContainers().removeAll(insertedSet));
+        result.getNewEntries().removeAll(insertedSet));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index a513f6c..390746f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -280,7 +280,8 @@ public class TestEndPoint {
           .register(nodeToRegister.getProtoBufMessage(), TestUtils
                   .createNodeReport(
                       getStorageReports(nodeToRegister.getUuid())),
-              TestUtils.getRandomContainerReports(10));
+              TestUtils.getRandomContainerReports(10),
+                  TestUtils.getRandomPipelineReports());
       Assert.assertNotNull(responseProto);
       Assert.assertEquals(nodeToRegister.getUuidString(),
           responseProto.getDatanodeUUID());
@@ -308,6 +309,8 @@ public class TestEndPoint {
         .createNodeReport(getStorageReports(UUID.randomUUID())));
     when(ozoneContainer.getContainerReport()).thenReturn(
         TestUtils.getRandomContainerReports(10));
+    when(ozoneContainer.getPipelineReport()).thenReturn(
+            TestUtils.getRandomPipelineReports());
     RegisterEndpointTask endpointTask =
         new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer,
             mock(StateContext.class));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index a0249aa..e8a6892 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -17,6 +17,8 @@
 package org.apache.hadoop.ozone.container.testutils;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.node.CommandQueue;
@@ -252,7 +254,8 @@ public class ReplicationNodeManagerMock implements NodeManager {
    */
   @Override
   public RegisteredCommand register(DatanodeDetails dd,
-                                    NodeReportProto nodeReport) {
+                                    NodeReportProto nodeReport,
+                                    PipelineReportsProto pipelineReportsProto) {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
index aefa6b0..ad3798e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java
@@ -98,7 +98,7 @@ public class TestNode2PipelineMap {
 
     // get pipeline details by dnid
     Set<PipelineID> pipelines = mapping.getPipelineSelector()
-        .getPipelineId(dns.get(0).getUuid());
+        .getPipelineByDnID(dns.get(0).getUuid());
     Assert.assertEquals(1, pipelines.size());
     pipelines.forEach(p -> Assert.assertEquals(p,
         ratisContainer.getPipeline().getId()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
index a5828e1..5eabfb9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java
@@ -119,7 +119,7 @@ public class TestPipelineClose {
         HddsProtos.LifeCycleState.CLOSED);
     for (DatanodeDetails dn : ratisContainer1.getPipeline().getMachines()) {
       // Assert that the pipeline has been removed from Node2PipelineMap as well
-      Assert.assertEquals(pipelineSelector.getPipelineId(
+      Assert.assertEquals(pipelineSelector.getPipelineByDnID(
           dn.getUuid()).size(), 0);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
index 3999d76..fb94b3c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.scm.pipeline;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@@ -87,7 +88,7 @@ public class TestSCMRestart {
   }
 
   @Test
-  public void testPipelineWithScmRestart() {
+  public void testPipelineWithScmRestart() throws IOException {
     // After restart make sure that the pipeline are still present
     Pipeline ratisPipeline1AfterRestart = newMapping.getPipelineSelector()
             .getPipeline(ratisPipeline1.getId());
@@ -97,5 +98,22 @@ public class TestSCMRestart {
     Assert.assertNotSame(ratisPipeline2AfterRestart, ratisPipeline2);
     Assert.assertEquals(ratisPipeline1AfterRestart, ratisPipeline1);
     Assert.assertEquals(ratisPipeline2AfterRestart, ratisPipeline2);
+
+    for (DatanodeDetails dn : ratisPipeline1.getMachines()) {
+      Assert.assertEquals(dn, ratisPipeline1AfterRestart.getDatanodes()
+              .get(dn.getUuidString()));
+    }
+
+    for (DatanodeDetails dn : ratisPipeline2.getMachines()) {
+      Assert.assertEquals(dn, ratisPipeline2AfterRestart.getDatanodes()
+              .get(dn.getUuidString()));
+    }
+
+    // Try creating a new ratis pipeline, it should be from the same pipeline
+    // as was before restart
+    Pipeline newRatisPipeline =
+            newMapping.allocateContainer(RATIS, THREE, "Owner1")
+                    .getPipeline();
+    Assert.assertEquals(newRatisPipeline.getId(), ratisPipeline1.getId());
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
index 1cb2cda..a83c16e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
@@ -36,8 +36,12 @@ import org.slf4j.LoggerFactory;
 import java.io.Closeable;
 import java.io.IOException;
 import java.net.URISyntaxException;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+
 /**
  * Helpers for Ratis tests.
  */
@@ -60,6 +64,7 @@ public interface RatisTestHelper {
     public RatisTestSuite()
         throws IOException, TimeoutException, InterruptedException {
       conf = newOzoneConfiguration(RPC);
+
       cluster = newMiniOzoneCluster(NUM_DATANODES, conf);
     }
 
@@ -96,6 +101,8 @@ public interface RatisTestHelper {
   static void initRatisConf(RpcType rpc, Configuration conf) {
     conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
     conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name());
+    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
+    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
     LOG.info(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY
         + " = " + rpc.name());
   }
@@ -104,6 +111,8 @@ public interface RatisTestHelper {
       int numDatanodes, OzoneConfiguration conf)
       throws IOException, TimeoutException, InterruptedException {
     final MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
+        .setHbInterval(1000)
+        .setHbProcessorInterval(1000)
         .setNumDatanodes(numDatanodes).build();
     cluster.waitForClusterToBeReady();
     return cluster;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 6377f11..02cd985 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -136,6 +136,7 @@ public class TestKeys {
     ozoneCluster = MiniOzoneCluster.newBuilder(conf)
         .setNumDatanodes(1)
         .setHbInterval(1000)
+        .setHbProcessorInterval(1000)
         .build();
     ozoneCluster.waitForClusterToBeReady();
     client = new RpcClient(conf);
@@ -328,7 +329,6 @@ public class TestKeys {
     cluster.restartHddsDatanode(datanodeIdx);
   }
 
-  @Ignore("Causes a JVm exit")
   @Test
   public void testPutAndGetKeyWithDnRestart() throws Exception {
     runTestPutAndGetKeyWithDnRestart(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
index 915d0f6..2e8f539 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
@@ -26,7 +26,6 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.Ignore;
 import org.junit.rules.Timeout;
 
 import static org.apache.hadoop.ozone.web.client
@@ -83,7 +82,6 @@ public class TestKeysRatis {
         getMultiPartKey(delimiter)));
   }
 
-  @Ignore("disabling for now, datanodes restart with ratis is buggy")
   @Test
   public void testPutAndGetKeyWithDnRestart() throws Exception {
     runTestPutAndGetKeyWithDnRestart(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 275ae6e..caf6d4f 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -101,7 +101,7 @@
     <ldap-api.version>1.0.0-M33</ldap-api.version>
 
     <!-- Apache Ratis version -->
-    <ratis.version>0.3.0-50588bd-SNAPSHOT</ratis.version>
+    <ratis.version>0.3.0-eca3531-SNAPSHOT</ratis.version>
     <jcache.version>1.0-alpha-1</jcache.version>
     <ehcache.version>3.3.1</ehcache.version>
     <hikari.version>2.4.12</hikari.version>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDDS-507. EventQueue should be shutdown on SCM shutdown. Contributed by Xiaoyu Yao.

Posted by xy...@apache.org.
HDDS-507. EventQueue should be shutdown on SCM shutdown. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/236d16e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/236d16e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/236d16e3

Branch: refs/heads/HDDS-4
Commit: 236d16e3a5611b8ca4f80cb7e8ffd63f167db8e0
Parents: 042bf74
Author: Nanda kumar <na...@apache.org>
Authored: Thu Sep 20 01:22:18 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Thu Sep 20 01:22:18 2018 +0530

----------------------------------------------------------------------
 .../hadoop/hdds/scm/server/StorageContainerManager.java       | 7 +++++++
 1 file changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/236d16e3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 86c061b..b2cbc93 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -759,6 +759,13 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     }
 
     unregisterMXBean();
+    // Event queue must be stopped before the DB store is closed at the end.
+    try {
+      LOG.info("Stopping SCM Event Queue.");
+      eventQueue.close();
+    } catch (Exception ex) {
+      LOG.error("SCM Event Queue stop failed", ex);
+    }
     IOUtils.cleanupWithLogger(LOG, scmContainerManager);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-5. Enable OzoneManager kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47511e7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47511e7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47511e7e

Branch: refs/heads/HDDS-4
Commit: 47511e7e275ef7db805efdafdca90b7cb4dfa14c
Parents: 9dc72d1
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon May 14 09:36:57 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Sep 20 15:13:06 2018 -0700

----------------------------------------------------------------------
 .../common/src/main/resources/ozone-default.xml |  34 +++-
 .../apache/hadoop/ozone/om/OMConfigKeys.java    |   9 +
 .../ozone/om/protocol/OzoneManagerProtocol.java |   6 +
 .../om/protocolPB/OzoneManagerProtocolPB.java   |   4 +
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |   3 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java    | 168 +++++++++++++++----
 .../apache/hadoop/ozone/om/OzoneManager.java    |  69 +++++++-
 .../hadoop/ozone/om/OzoneManagerHttpServer.java |   5 +-
 8 files changed, 247 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47511e7e/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 65b4dc4..efb5aa6 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1238,7 +1238,23 @@
     <name>ozone.scm.kerberos.principal</name>
     <value></value>
     <tag> OZONE, SECURITY</tag>
-    <description>The SCM service principal. Ex scm/_HOST@REALM.TLD.</description>
+    <description>The SCM service principal. Ex scm/_HOST@REALM.COM</description>
+  </property>
+
+  <property>
+    <name>ozone.om.kerberos.keytab.file</name>
+    <value></value>
+    <tag> OZONE, SECURITY</tag>
+    <description> The keytab file used by KSM daemon to login as its
+      service principal. The principal name is configured with
+      hdds.ksm.kerberos.principal.
+    </description>
+  </property>
+  <property>
+    <name>ozone.om.kerberos.principal</name>
+    <value></value>
+    <tag> OZONE, SECURITY</tag>
+    <description>The KSM service principal. Ex ksm/_HOST@REALM.COM</description>
   </property>
 
   <property>
@@ -1249,4 +1265,18 @@
     <name>ozone.scm.web.authentication.kerberos.keytab</name>
     <value>/etc/security/keytabs/HTTP.keytab</value>
   </property>
-</configuration>
+  <property>
+    <name>hdds.ksm.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>
+      KSM http server kerberos principal.
+    </description>
+  </property>
+  <property>
+    <name>hdds.ksm.web.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/HTTP.keytab</value>
+    <description>
+      KSM http server kerberos keytab.
+    </description>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47511e7e/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index b9ca296..6a828ca 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -78,4 +78,13 @@ public final class OMConfigKeys {
   public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
       "ozone.key.deleting.limit.per.task";
   public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+
+  public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om."
+      + "kerberos.keytab.file";
+  public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om"
+      + ".kerberos.principal";
+  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+      "ozone.om.http.kerberos.keytab.file";
+  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY
+      = "ozone.om.http.kerberos.principal";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47511e7e/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index edb260a..6f11e08 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.om.protocol;
 
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -25,14 +26,19 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
 import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+
+
 import org.apache.hadoop.ozone.protocol.proto
     .OzoneManagerProtocolProtos.OzoneAclInfo;
 import java.io.IOException;
 import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol to talk to OM.
  */
+@KerberosInfo(
+    serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
 public interface OzoneManagerProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47511e7e/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
index e0879d6..27e8f22 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
@@ -18,9 +18,11 @@
 package org.apache.hadoop.ozone.om.protocolPB;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.ozone.protocol.proto
     .OzoneManagerProtocolProtos.OzoneManagerService;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol used to communicate with OM.
@@ -28,6 +30,8 @@ import org.apache.hadoop.ozone.protocol.proto
 @ProtocolInfo(protocolName =
     "org.apache.hadoop.ozone.protocol.OzoneManagerProtocol",
     protocolVersion = 1)
+@KerberosInfo(
+    serverPrincipal = OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface OzoneManagerProtocolPB
     extends OzoneManagerService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47511e7e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 6b341d3..113fea8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -425,7 +425,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
      *
      * @throws IOException
      */
-    private OzoneManager createOM() throws IOException {
+    private OzoneManager createOM()
+        throws IOException, AuthenticationException {
       configureOM();
       OMStorage omStore = new OMStorage(conf);
       initializeOmStorage(omStore);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47511e7e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 9c430ad..5c96067 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -26,6 +26,7 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.Properties;
 import java.util.UUID;
+import java.util.concurrent.Callable;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -35,15 +36,22 @@ import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMStorage;
+import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.security.KerberosAuthException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,13 +64,23 @@ public final class TestSecureOzoneCluster {
   private Logger LOGGER = LoggerFactory
       .getLogger(TestSecureOzoneCluster.class);
 
+  @Rule
+  public Timeout timeout = new Timeout(80000);
+
   private MiniKdc miniKdc;
   private OzoneConfiguration conf;
   private File workDir;
   private static Properties securityProperties;
   private File scmKeytab;
   private File spnegoKeytab;
+  private File omKeyTab;
   private String curUser;
+  private StorageContainerManager scm;
+  private OzoneManager om;
+
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
 
   @Before
   public void init() {
@@ -71,6 +89,10 @@ public final class TestSecureOzoneCluster {
       startMiniKdc();
       setSecureConfig(conf);
       createCredentialsInKDC(conf, miniKdc);
+
+      clusterId = UUID.randomUUID().toString();
+      scmId = UUID.randomUUID().toString();
+      omId = UUID.randomUUID().toString();
     } catch (IOException e) {
       LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
     } catch (Exception e) {
@@ -78,12 +100,31 @@ public final class TestSecureOzoneCluster {
     }
   }
 
+  @After
+  public void stop() {
+    try {
+      stopMiniKdc();
+      if (scm != null) {
+        scm.stop();
+      }
+      if (om != null) {
+        om.stop();
+      }
+    } catch (Exception e) {
+      LOGGER.error("Failed to stop TestSecureOzoneCluster", e);
+    }
+  }
+
   private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
       throws Exception {
     createPrincipal(scmKeytab,
         conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
     createPrincipal(spnegoKeytab,
-        conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+        conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
+        conf.get(OMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+    createPrincipal(omKeyTab,
+        conf.get(OMConfigKeys
+            .OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
   }
 
   private void createPrincipal(File keytab, String... principal)
@@ -99,6 +140,10 @@ public final class TestSecureOzoneCluster {
     miniKdc.start();
   }
 
+  private void stopMiniKdc() throws Exception {
+    miniKdc.stop();
+  }
+
   private void setSecureConfig(Configuration conf) throws IOException {
     conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
     String host = KerberosUtil.getLocalHostName();
@@ -114,59 +159,56 @@ public final class TestSecureOzoneCluster {
     conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
         "HTTP_SCM/" + host + "@" + realm);
 
+    conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
+        "om/" + host + "@" + realm);
+    conf.set(OMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+        "HTTP_KSM/" + host + "@" + realm);
+
     scmKeytab = new File(workDir, "scm.keytab");
     spnegoKeytab = new File(workDir, "http.keytab");
+    omKeyTab = new File(workDir, "om.keytab");
 
     conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
         scmKeytab.getAbsolutePath());
     conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
         spnegoKeytab.getAbsolutePath());
+    conf.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+        omKeyTab.getAbsolutePath());
 
   }
 
   @Test
   public void testSecureScmStartupSuccess() throws Exception {
+
+    initSCM();
+    scm = StorageContainerManager.createSCM(null, conf);
+    //Reads the SCM Info from SCM instance
+    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+    Assert.assertEquals(clusterId, scmInfo.getClusterId());
+    Assert.assertEquals(scmId, scmInfo.getScmId());
+  }
+
+  private void initSCM()
+      throws IOException, AuthenticationException {
     final String path = GenericTestUtils
         .getTempPath(UUID.randomUUID().toString());
     Path scmPath = Paths.get(path, "scm-meta");
     conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
     conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
     SCMStorage scmStore = new SCMStorage(conf);
-    String clusterId = UUID.randomUUID().toString();
-    String scmId = UUID.randomUUID().toString();
     scmStore.setClusterId(clusterId);
     scmStore.setScmId(scmId);
     // writes the version file properties
     scmStore.initialize();
-    StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
-    //Reads the SCM Info from SCM instance
-    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
-    Assert.assertEquals(clusterId, scmInfo.getClusterId());
-    Assert.assertEquals(scmId, scmInfo.getScmId());
   }
 
   @Test
   public void testSecureScmStartupFailure() throws Exception {
-    final String path = GenericTestUtils
-        .getTempPath(UUID.randomUUID().toString());
-    Path scmPath = Paths.get(path, "scm-meta");
-
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
-    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
-        "scm@" + miniKdc.getRealm());
+    initSCM();
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
         "kerberos");
 
-    SCMStorage scmStore = new SCMStorage(conf);
-    String clusterId = UUID.randomUUID().toString();
-    String scmId = UUID.randomUUID().toString();
-    scmStore.setClusterId(clusterId);
-    scmStore.setScmId(scmId);
-    // writes the version file properties
-    scmStore.initialize();
     LambdaTestUtils.intercept(IOException.class,
         "Running in secure mode, but config doesn't have a keytab",
         () -> {
@@ -178,28 +220,82 @@ public final class TestSecureOzoneCluster {
     conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
         "/etc/security/keytabs/scm.keytab");
 
+    testCommonKerberosFailures(
+        () -> StorageContainerManager.createSCM(null, conf));
+
+  }
+
+  private void testCommonKerberosFailures(Callable callable) throws Exception {
     LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
-            + "to login: for principal:",
-        () -> {
-          StorageContainerManager.createSCM(null, conf);
-        });
+        + "to login: for principal:", callable);
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
         "OAuth2");
 
     LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
             + " attribute value for hadoop.security.authentication of OAuth2",
-        () -> {
-          StorageContainerManager.createSCM(null, conf);
-        });
+        callable);
 
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
         "KERBEROS_SSL");
     LambdaTestUtils.intercept(AuthenticationException.class,
-        "KERBEROS_SSL authentication method not support.",
-        () -> {
-          StorageContainerManager.createSCM(null, conf);
-        });
+        "KERBEROS_SSL authentication method not",
+        callable);
+  }
 
+  /**
+   * Tests the secure KSM Initialization Failure.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSecureKsmInitializationFailure() throws Exception {
+    initSCM();
+    // Create a secure SCM instance as om client will connect to it
+    scm = StorageContainerManager.createSCM(null, conf);
+
+    final String path = GenericTestUtils
+        .getTempPath(UUID.randomUUID().toString());
+    OMStorage ksmStore = new OMStorage(conf);
+    ksmStore.setClusterId("testClusterId");
+    ksmStore.setScmId("testScmId");
+    // writes the version file properties
+    ksmStore.initialize();
+    conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
+        "non-existent-user@EXAMPLE.com");
+    testCommonKerberosFailures(() -> OzoneManager.createOm(null, conf));
+  }
+
+  /**
+   * Tests the secure KSM Initialization success.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testSecureKsmInitializationSuccess() throws Exception {
+    initSCM();
+    // Create a secure SCM instance as om client will connect to it
+    scm = StorageContainerManager.createSCM(null, conf);
+    LogCapturer logs = LogCapturer.captureLogs(OzoneManager.LOG);
+    GenericTestUtils
+        .setLogLevel(LoggerFactory.getLogger(OzoneManager.class.getName()),
+            org.slf4j.event.Level.INFO);
+
+    final String path = GenericTestUtils
+        .getTempPath(UUID.randomUUID().toString());
+    Path metaDirPath = Paths.get(path, "om-meta");
+
+    OMStorage omStore = new OMStorage(conf);
+    omStore.setClusterId("testClusterId");
+    omStore.setScmId("testScmId");
+    // writes the version file properties
+    omStore.initialize();
+    try {
+      om = OzoneManager.createOm(null, conf);
+    } catch (Exception ex) {
+      // Expects timeout failure from scmClient in KSM but KSM user login via
+      // kerberos should succeed
+      Assert.assertTrue(logs.getOutput().contains("KSM login successful"));
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47511e7e/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 6ea0fe7..db14063d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.audit.AuditLogger;
 import org.apache.hadoop.ozone.audit.AuditLoggerType;
 import org.apache.hadoop.ozone.audit.AuditMessage;
 import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
@@ -63,7 +64,10 @@ import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.logging.log4j.Level;
@@ -91,6 +95,10 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneManagerService.newReflectiveBlockingService;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 /**
@@ -99,7 +107,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
 @InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
 public final class OzoneManager extends ServiceRuntimeInfoImpl
     implements OzoneManagerProtocol, OMMXBean {
-  private static final Logger LOG =
+  public static final Logger LOG =
       LoggerFactory.getLogger(OzoneManager.class);
 
   private static final AuditLogger AUDIT =
@@ -127,14 +135,16 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     Preconditions.checkNotNull(conf);
     configuration = conf;
     omStorage = new OMStorage(conf);
-    scmBlockClient = getScmBlockClient(configuration);
-    scmContainerClient = getScmContainerClient(configuration);
     if (omStorage.getState() != StorageState.INITIALIZED) {
       throw new OMException("OM not initialized.",
           ResultCodes.OM_NOT_INITIALIZED);
     }
 
+    scmContainerClient = getScmContainerClient(configuration);
+
     // verifies that the SCM info in the OM Version file is correct.
+    scmBlockClient = getScmBlockClient(configuration);
+
     ScmInfo scmInfo = scmBlockClient.getScmInfo();
     if (!(scmInfo.getClusterId().equals(omStorage.getClusterID()) && scmInfo
         .getScmId().equals(omStorage.getScmId()))) {
@@ -167,6 +177,35 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   }
 
   /**
+   * Login KSM service user if security and Kerberos are enabled.
+   *
+   * @param  conf
+   * @throws IOException, AuthenticationException
+   */
+  private static void loginKSMUser(OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
+
+    if (SecurityUtil.getAuthenticationMethod(conf).equals
+        (AuthenticationMethod.KERBEROS)) {
+      LOG.debug("Ozone security is enabled. Attempting login for KSM user. "
+              + "Principal: {},keytab: {}", conf.get
+              (OZONE_OM_KERBEROS_PRINCIPAL_KEY),
+          conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
+
+      UserGroupInformation.setConfiguration(conf);
+
+      InetSocketAddress socAddr = getOmAddress(conf);
+      SecurityUtil.login(conf, OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
+          OZONE_OM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+    } else {
+      throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
+          (conf) + " authentication method not supported. KSM user login "
+          + "failed.");
+    }
+    LOG.info("KSM login successful.");
+  }
+
+  /**
    * Create a scm block client, used by putKey() and getKey().
    *
    * @return {@link ScmBlockLocationProtocol}
@@ -283,15 +322,16 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * @param argv Command line arguments
    * @param conf OzoneConfiguration
    * @return OM instance
-   * @throws IOException in case OM instance creation fails.
+   * @throws IOException, AuthenticationException in case OM instance
+   *   creation fails.
    */
   @VisibleForTesting
   public static OzoneManager createOm(
-      String[] argv, OzoneConfiguration conf) throws IOException {
+      String[] argv, OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
     return createOm(argv, conf, false);
   }
 
-
   /**
    * Constructs OM instance based on command line arguments.
    *
@@ -299,10 +339,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * @param conf OzoneConfiguration
    * @param printBanner if true then log a verbose startup message.
    * @return OM instance
-   * @throws IOException in case OM instance creation fails.
+   * @throws IOException, AuthenticationException in case OM instance
+   *   creation fails.
    */
   private static OzoneManager createOm(String[] argv,
-      OzoneConfiguration conf, boolean printBanner) throws IOException {
+      OzoneConfiguration conf, boolean printBanner)
+      throws IOException, AuthenticationException {
     if (!isHddsEnabled(conf)) {
       System.err.println("OM cannot be started in secure mode or when " +
           OZONE_ENABLED + " is set to false");
@@ -314,6 +356,10 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       terminate(1);
       return null;
     }
+    // Authenticate KSM if security is enabled
+    if (conf.getBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true)) {
+      loginKSMUser(conf);
+    }
     switch (startOpt) {
     case CREATEOBJECTSTORE:
       if (printBanner) {
@@ -450,7 +496,12 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     metadataManager.start();
     keyManager.start();
     omRpcServer.start();
-    httpServer.start();
+    try {
+      httpServer.start();
+    } catch (Exception ex) {
+      // Allow OM to start as Http Server failure is not fatal.
+      LOG.error("OM HttpServer failed to start.", ex);
+    }
     registerMXBean();
     setStartTime();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47511e7e/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
index bd6ab69..be12fa8 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.ozone.om;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.server.BaseHttpServer;
 
@@ -65,11 +64,11 @@ public class OzoneManagerHttpServer extends BaseHttpServer {
   }
 
   @Override protected String getKeytabFile() {
-    return OMConfigKeys.OZONE_OM_KEYTAB_FILE;
+    return OMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE;
   }
 
   @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+    return OMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
   }
 
   @Override protected String getEnabledKey() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is null. Contributed by Lokesh Jain and Dinesh Chitlangia

Posted by xy...@apache.org.
HADOOP-15755. StringUtils#createStartupShutdownMessage throws NPE when args is null. Contributed by Lokesh Jain and Dinesh Chitlangia


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e71f61ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e71f61ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e71f61ec

Branch: refs/heads/HDDS-4
Commit: e71f61ecb87e04727a5a76e578a75714c9db6706
Parents: 5896372
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Sep 18 15:55:09 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Sep 18 15:57:33 2018 -0500

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/util/StringUtils.java   | 2 +-
 .../test/java/org/apache/hadoop/util/TestStringUtils.java   | 9 +++++++++
 2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71f61ec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
index 3db805f..f49698c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
@@ -743,7 +743,7 @@ public class StringUtils {
     return toStartupShutdownString("STARTUP_MSG: ", new String[] {
         "Starting " + classname,
         "  host = " + hostname,
-        "  args = " + Arrays.asList(args),
+        "  args = " + (args != null ? Arrays.asList(args) : new ArrayList<>()),
         "  version = " + VersionInfo.getVersion(),
         "  classpath = " + System.getProperty("java.class.path"),
         "  build = " + VersionInfo.getUrl() + " -r "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e71f61ec/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 3fdc1bb..f05b589 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -503,6 +503,15 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
         escapedStr, StringUtils.escapeHTML(htmlStr));
   }
 
+  @Test
+  public void testCreateStartupShutdownMessage() {
+    //pass null args and method must still return a string beginning with
+    // "STARTUP_MSG"
+    String msg = StringUtils.createStartupShutdownMessage(
+        this.getClass().getName(), "test.host", null);
+    assertTrue(msg.startsWith("STARTUP_MSG:"));
+  }
+
   // Benchmark for StringUtils split
   public static void main(String []args) {
     final String TO_SPLIT = "foo,bar,baz,blah,blah";


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDDS-506. Fields in AllocateScmBlockResponseProto should be optional. Contributed by Arpit Agarwal.

Posted by xy...@apache.org.
HDDS-506. Fields in AllocateScmBlockResponseProto should be optional. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e435e12f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e435e12f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e435e12f

Branch: refs/heads/HDDS-4
Commit: e435e12f1fd00a5f78621ec1933a60171fc224e6
Parents: fb85351
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Sep 18 23:21:29 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Sep 18 23:21:29 2018 -0700

----------------------------------------------------------------------
 .../common/src/main/proto/ScmBlockLocationProtocol.proto       | 6 +++---
 .../org/apache/hadoop/hdds/scm/exceptions/SCMException.java    | 3 ++-
 .../org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java | 4 ++++
 3 files changed, 9 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e435e12f/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
index 53f408a..9b4e0ac 100644
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
@@ -104,9 +104,9 @@ message AllocateScmBlockResponseProto {
     unknownFailure = 4;
   }
   required Error errorCode = 1;
-  required BlockID blockID = 2;
-  required hadoop.hdds.Pipeline pipeline = 3;
-  required bool createContainer = 4;
+  optional BlockID blockID = 2;
+  optional hadoop.hdds.Pipeline pipeline = 3;
+  optional bool createContainer = 4;
   optional string errorMessage = 5;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e435e12f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index 87a29e3..dae0b06 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -118,6 +118,7 @@ public class SCMException extends IOException {
     SCM_NOT_INITIALIZED,
     DUPLICATE_DATANODE,
     NO_SUCH_DATANODE,
-    NO_REPLICA_FOUND
+    NO_REPLICA_FOUND,
+    FAILED_TO_FIND_ACTIVE_PIPELINE
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e435e12f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index c9f51f7..82946bd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -62,6 +62,7 @@ import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
     .FAILED_TO_CHANGE_PIPELINE_STATE;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_FIND_ACTIVE_PIPELINE;
 import static org.apache.hadoop.hdds.server
         .ServerUtils.getOzoneMetaDirPath;
 import static org.apache.hadoop.ozone
@@ -285,6 +286,9 @@ public class PipelineSelector {
       // try to return a pipeline from already allocated pipelines
       PipelineID pipelineId =
               manager.getPipeline(replicationFactor, replicationType);
+      if (pipelineId == null) {
+        throw new SCMException(FAILED_TO_FIND_ACTIVE_PIPELINE);
+      }
       pipeline = pipelineMap.get(pipelineId);
       Preconditions.checkArgument(pipeline.getLifeCycleState() ==
               LifeCycleState.OPEN);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index ed4536f..4f2b3a2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
@@ -116,7 +116,7 @@ public final class KeyValueContainerUtil {
     File chunksPath = new File(containerData.getChunksPath());
 
     // Close the DB connection and remove the DB handler from cache
-    KeyUtils.removeDB(containerData, conf);
+    BlockUtils.removeDB(containerData, conf);
 
     // Delete the Container MetaData path.
     FileUtils.deleteDirectory(containerMetaDataPath);
@@ -175,16 +175,16 @@ public final class KeyValueContainerUtil {
     }
     kvContainerData.setDbFile(dbFile);
 
-    MetadataStore metadata = KeyUtils.getDB(kvContainerData, config);
+    MetadataStore metadata = BlockUtils.getDB(kvContainerData, config);
     long bytesUsed = 0;
     List<Map.Entry<byte[], byte[]>> liveKeys = metadata
         .getRangeKVs(null, Integer.MAX_VALUE,
             MetadataKeyFilters.getNormalKeyFilter());
     bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
-      KeyData keyData;
+      BlockData blockData;
       try {
-        keyData = KeyUtils.getKeyData(e.getValue());
-        return keyData.getSize();
+        blockData = BlockUtils.getBlockData(e.getValue());
+        return blockData.getSize();
       } catch (IOException ex) {
         return 0L;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
index df60c60..3495363 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
@@ -69,7 +69,7 @@ public final class SmallFileUtils {
         ContainerProtos.ReadChunkResponseProto.newBuilder();
     readChunkresponse.setChunkData(info.getProtoBufMessage());
     readChunkresponse.setData(ByteString.copyFrom(data));
-    readChunkresponse.setBlockID(msg.getGetSmallFile().getKey().getBlockID());
+    readChunkresponse.setBlockID(msg.getGetSmallFile().getBlock().getBlockID());
 
     ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile =
         ContainerProtos.GetSmallFileResponseProto.newBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
new file mode 100644
index 0000000..54c15fb
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.impl;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
+import org.apache.hadoop.utils.MetadataStore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK;
+
+/**
+ * This class is for performing block related operations on the KeyValue
+ * Container.
+ */
+public class BlockManagerImpl implements BlockManager {
+
+  static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class);
+
+  private Configuration config;
+
+  /**
+   * Constructs a Block Manager.
+   *
+   * @param conf - Ozone configuration
+   */
+  public BlockManagerImpl(Configuration conf) {
+    Preconditions.checkNotNull(conf, "Config cannot be null");
+    this.config = conf;
+  }
+
+  /**
+   * Puts or overwrites a block.
+   *
+   * @param container - Container for which block need to be added.
+   * @param data     - BlockData.
+   * @return length of the block.
+   * @throws IOException
+   */
+  public long putBlock(Container container, BlockData data) throws IOException {
+    Preconditions.checkNotNull(data, "BlockData cannot be null for put " +
+        "operation.");
+    Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
+        "cannot be negative");
+    // We are not locking the key manager since LevelDb serializes all actions
+    // against a single DB. We rely on DB level locking to avoid conflicts.
+    MetadataStore db = BlockUtils.getDB((KeyValueContainerData) container
+        .getContainerData(), config);
+
+    // This is a post condition that acts as a hint to the user.
+    // Should never fail.
+    Preconditions.checkNotNull(db, "DB cannot be null here");
+    db.put(Longs.toByteArray(data.getLocalID()), data.getProtoBufMessage()
+        .toByteArray());
+
+    // Increment keycount here
+    container.getContainerData().incrKeyCount();
+    return data.getSize();
+  }
+
+  /**
+   * Gets an existing block.
+   *
+   * @param container - Container from which block need to be fetched.
+   * @param blockID - BlockID of the block.
+   * @return Key Data.
+   * @throws IOException
+   */
+  public BlockData getBlock(Container container, BlockID blockID)
+      throws IOException {
+    Preconditions.checkNotNull(blockID,
+        "BlockID cannot be null in GetBlock request");
+    Preconditions.checkNotNull(blockID.getContainerID(),
+        "Container name cannot be null");
+
+    KeyValueContainerData containerData = (KeyValueContainerData) container
+        .getContainerData();
+    MetadataStore db = BlockUtils.getDB(containerData, config);
+    // This is a post condition that acts as a hint to the user.
+    // Should never fail.
+    Preconditions.checkNotNull(db, "DB cannot be null here");
+    byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
+    if (kData == null) {
+      throw new StorageContainerException("Unable to find the block.",
+          NO_SUCH_BLOCK);
+    }
+    ContainerProtos.BlockData blockData =
+        ContainerProtos.BlockData.parseFrom(kData);
+    return BlockData.getFromProtoBuf(blockData);
+  }
+
+  /**
+   * Returns the length of the committed block.
+   *
+   * @param container - Container from which block need to be fetched.
+   * @param blockID - BlockID of the block.
+   * @return length of the block.
+   * @throws IOException in case, the block key does not exist in db.
+   */
+  @Override
+  public long getCommittedBlockLength(Container container, BlockID blockID)
+      throws IOException {
+    KeyValueContainerData containerData = (KeyValueContainerData) container
+        .getContainerData();
+    MetadataStore db = BlockUtils.getDB(containerData, config);
+    // This is a post condition that acts as a hint to the user.
+    // Should never fail.
+    Preconditions.checkNotNull(db, "DB cannot be null here");
+    byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
+    if (kData == null) {
+      throw new StorageContainerException("Unable to find the block.",
+          NO_SUCH_BLOCK);
+    }
+    ContainerProtos.BlockData blockData =
+        ContainerProtos.BlockData.parseFrom(kData);
+    return blockData.getSize();
+  }
+
+  /**
+   * Deletes an existing block.
+   *
+   * @param container - Container from which block need to be deleted.
+   * @param blockID - ID of the block.
+   * @throws StorageContainerException
+   */
+  public void deleteBlock(Container container, BlockID blockID) throws
+      IOException {
+    Preconditions.checkNotNull(blockID, "block ID cannot be null.");
+    Preconditions.checkState(blockID.getContainerID() >= 0,
+        "Container ID cannot be negative.");
+    Preconditions.checkState(blockID.getLocalID() >= 0,
+        "Local ID cannot be negative.");
+
+    KeyValueContainerData cData = (KeyValueContainerData) container
+        .getContainerData();
+    MetadataStore db = BlockUtils.getDB(cData, config);
+    // This is a post condition that acts as a hint to the user.
+    // Should never fail.
+    Preconditions.checkNotNull(db, "DB cannot be null here");
+    // Note : There is a race condition here, since get and delete
+    // are not atomic. Leaving it here since the impact is refusing
+    // to delete a Block which might have just gotten inserted after
+    // the get check.
+    byte[] kKey = Longs.toByteArray(blockID.getLocalID());
+    byte[] kData = db.get(kKey);
+    if (kData == null) {
+      throw new StorageContainerException("Unable to find the block.",
+          NO_SUCH_BLOCK);
+    }
+    db.delete(kKey);
+
+    // Decrement blockcount here
+    container.getContainerData().decrKeyCount();
+  }
+
+  /**
+   * List blocks in a container.
+   *
+   * @param container - Container from which blocks need to be listed.
+   * @param startLocalID  - Key to start from, 0 to begin.
+   * @param count    - Number of blocks to return.
+   * @return List of Blocks that match the criteria.
+   */
+  @Override
+  public List<BlockData> listBlock(Container container, long startLocalID, int
+      count) throws IOException {
+    Preconditions.checkNotNull(container, "container cannot be null");
+    Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " +
+        "negative");
+    Preconditions.checkArgument(count > 0,
+        "Count must be a positive number.");
+    container.readLock();
+    List<BlockData> result = null;
+    KeyValueContainerData cData = (KeyValueContainerData) container
+        .getContainerData();
+    MetadataStore db = BlockUtils.getDB(cData, config);
+    result = new ArrayList<>();
+    byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
+    List<Map.Entry<byte[], byte[]>> range = db.getSequentialRangeKVs(
+        startKeyInBytes, count, null);
+    for (Map.Entry<byte[], byte[]> entry : range) {
+      BlockData value = BlockUtils.getBlockData(entry.getValue());
+      BlockData data = new BlockData(value.getBlockID());
+      result.add(data);
+    }
+    return result;
+  }
+
+  /**
+   * Shutdown KeyValueContainerManager.
+   */
+  public void shutdown() {
+    BlockUtils.shutdownCache(ContainerCache.getInstance(config));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
deleted file mode 100644
index 6370f8e..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyManagerImpl.java
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.impl;
-
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.utils.MetadataStore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_KEY;
-
-/**
- * This class is for performing key related operations on the KeyValue
- * Container.
- */
-public class KeyManagerImpl implements KeyManager {
-
-  static final Logger LOG = LoggerFactory.getLogger(KeyManagerImpl.class);
-
-  private Configuration config;
-
-  /**
-   * Constructs a key Manager.
-   *
-   * @param conf - Ozone configuration
-   */
-  public KeyManagerImpl(Configuration conf) {
-    Preconditions.checkNotNull(conf, "Config cannot be null");
-    this.config = conf;
-  }
-
-  /**
-   * Puts or overwrites a key.
-   *
-   * @param container - Container for which key need to be added.
-   * @param data     - Key Data.
-   * @return length of the key.
-   * @throws IOException
-   */
-  public long putKey(Container container, KeyData data) throws IOException {
-    Preconditions.checkNotNull(data, "KeyData cannot be null for put " +
-        "operation.");
-    Preconditions.checkState(data.getContainerID() >= 0, "Container Id " +
-        "cannot be negative");
-    // We are not locking the key manager since LevelDb serializes all actions
-    // against a single DB. We rely on DB level locking to avoid conflicts.
-    MetadataStore db = KeyUtils.getDB((KeyValueContainerData) container
-        .getContainerData(), config);
-
-    // This is a post condition that acts as a hint to the user.
-    // Should never fail.
-    Preconditions.checkNotNull(db, "DB cannot be null here");
-    db.put(Longs.toByteArray(data.getLocalID()), data.getProtoBufMessage()
-        .toByteArray());
-
-    // Increment keycount here
-    container.getContainerData().incrKeyCount();
-    return data.getSize();
-  }
-
-  /**
-   * Gets an existing key.
-   *
-   * @param container - Container from which key need to be get.
-   * @param blockID - BlockID of the key.
-   * @return Key Data.
-   * @throws IOException
-   */
-  public KeyData getKey(Container container, BlockID blockID)
-      throws IOException {
-    Preconditions.checkNotNull(blockID,
-        "BlockID cannot be null in GetKet request");
-    Preconditions.checkNotNull(blockID.getContainerID(),
-        "Container name cannot be null");
-
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    MetadataStore db = KeyUtils.getDB(containerData, config);
-    // This is a post condition that acts as a hint to the user.
-    // Should never fail.
-    Preconditions.checkNotNull(db, "DB cannot be null here");
-    byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
-    if (kData == null) {
-      throw new StorageContainerException("Unable to find the key.",
-          NO_SUCH_KEY);
-    }
-    ContainerProtos.KeyData keyData = ContainerProtos.KeyData.parseFrom(kData);
-    return KeyData.getFromProtoBuf(keyData);
-  }
-
-  /**
-   * Returns the length of the committed block.
-   *
-   * @param container - Container from which key need to be get.
-   * @param blockID - BlockID of the key.
-   * @return length of the block.
-   * @throws IOException in case, the block key does not exist in db.
-   */
-  @Override
-  public long getCommittedBlockLength(Container container, BlockID blockID)
-      throws IOException {
-    KeyValueContainerData containerData = (KeyValueContainerData) container
-        .getContainerData();
-    MetadataStore db = KeyUtils.getDB(containerData, config);
-    // This is a post condition that acts as a hint to the user.
-    // Should never fail.
-    Preconditions.checkNotNull(db, "DB cannot be null here");
-    byte[] kData = db.get(Longs.toByteArray(blockID.getLocalID()));
-    if (kData == null) {
-      throw new StorageContainerException("Unable to find the key.",
-          NO_SUCH_KEY);
-    }
-    ContainerProtos.KeyData keyData = ContainerProtos.KeyData.parseFrom(kData);
-    return keyData.getSize();
-  }
-
-  /**
-   * Deletes an existing Key.
-   *
-   * @param container - Container from which key need to be deleted.
-   * @param blockID - ID of the block.
-   * @throws StorageContainerException
-   */
-  public void deleteKey(Container container, BlockID blockID) throws
-      IOException {
-    Preconditions.checkNotNull(blockID, "block ID cannot be null.");
-    Preconditions.checkState(blockID.getContainerID() >= 0,
-        "Container ID cannot be negative.");
-    Preconditions.checkState(blockID.getLocalID() >= 0,
-        "Local ID cannot be negative.");
-
-    KeyValueContainerData cData = (KeyValueContainerData) container
-        .getContainerData();
-    MetadataStore db = KeyUtils.getDB(cData, config);
-    // This is a post condition that acts as a hint to the user.
-    // Should never fail.
-    Preconditions.checkNotNull(db, "DB cannot be null here");
-    // Note : There is a race condition here, since get and delete
-    // are not atomic. Leaving it here since the impact is refusing
-    // to delete a key which might have just gotten inserted after
-    // the get check.
-    byte[] kKey = Longs.toByteArray(blockID.getLocalID());
-    byte[] kData = db.get(kKey);
-    if (kData == null) {
-      throw new StorageContainerException("Unable to find the key.",
-          NO_SUCH_KEY);
-    }
-    db.delete(kKey);
-
-    // Decrement keycount here
-    container.getContainerData().decrKeyCount();
-  }
-
-  /**
-   * List keys in a container.
-   *
-   * @param container - Container from which keys need to be listed.
-   * @param startLocalID  - Key to start from, 0 to begin.
-   * @param count    - Number of keys to return.
-   * @return List of Keys that match the criteria.
-   */
-  @Override
-  public List<KeyData> listKey(Container container, long startLocalID, int
-      count) throws IOException {
-    Preconditions.checkNotNull(container, "container cannot be null");
-    Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be " +
-        "negative");
-    Preconditions.checkArgument(count > 0,
-        "Count must be a positive number.");
-    container.readLock();
-    List<KeyData> result = null;
-    KeyValueContainerData cData = (KeyValueContainerData) container
-        .getContainerData();
-    MetadataStore db = KeyUtils.getDB(cData, config);
-    result = new ArrayList<>();
-    byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
-    List<Map.Entry<byte[], byte[]>> range = db.getSequentialRangeKVs(
-        startKeyInBytes, count, null);
-    for (Map.Entry<byte[], byte[]> entry : range) {
-      KeyData value = KeyUtils.getKeyData(entry.getValue());
-      KeyData data = new KeyData(value.getBlockID());
-      result.add(data);
-    }
-    return result;
-  }
-
-  /**
-   * Shutdown KeyValueContainerManager.
-   */
-  public void shutdown() {
-    KeyUtils.shutdownCache(ContainerCache.getInstance(config));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
index 525d51b..564b50e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/package-info.java
@@ -17,6 +17,5 @@
  */
 package org.apache.hadoop.ozone.container.keyvalue.impl;
 /**
- This package contains chunk manager and key manager implementation for
- keyvalue container type.
- **/
\ No newline at end of file
+ * Chunk manager and block manager implementations for keyvalue container type.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
new file mode 100644
index 0000000..35ed22a
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.interfaces;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * BlockManager is for performing key related operations on the container.
+ */
+public interface BlockManager {
+
+  /**
+   * Puts or overwrites a block.
+   *
+   * @param container - Container for which block need to be added.
+   * @param data     - Block Data.
+   * @return length of the Block.
+   * @throws IOException
+   */
+  long putBlock(Container container, BlockData data) throws IOException;
+
+  /**
+   * Gets an existing block.
+   *
+   * @param container - Container from which block need to be get.
+   * @param blockID - BlockID of the Block.
+   * @return Block Data.
+   * @throws IOException
+   */
+  BlockData getBlock(Container container, BlockID blockID) throws IOException;
+
+  /**
+   * Deletes an existing block.
+   *
+   * @param container - Container from which block need to be deleted.
+   * @param blockID - ID of the block.
+   * @throws StorageContainerException
+   */
+  void deleteBlock(Container container, BlockID blockID) throws IOException;
+
+  /**
+   * List blocks in a container.
+   *
+   * @param container - Container from which blocks need to be listed.
+   * @param startLocalID  - Block to start from, 0 to begin.
+   * @param count    - Number of blocks to return.
+   * @return List of Blocks that match the criteria.
+   */
+  List<BlockData> listBlock(Container container, long startLocalID, int count)
+      throws IOException;
+
+  /**
+   * Returns the last committed block length for the block.
+   * @param blockID blockId
+   */
+  long getCommittedBlockLength(Container container, BlockID blockID)
+      throws IOException;
+
+  /**
+   * Shutdown ContainerManager.
+   */
+  void shutdown();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
deleted file mode 100644
index 84f771a..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/KeyManager.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.interfaces;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * KeyManager is for performing key related operations on the container.
- */
-public interface KeyManager {
-
-  /**
-   * Puts or overwrites a key.
-   *
-   * @param container - Container for which key need to be added.
-   * @param data     - Key Data.
-   * @return length of the Key.
-   * @throws IOException
-   */
-  long putKey(Container container, KeyData data) throws IOException;
-
-  /**
-   * Gets an existing key.
-   *
-   * @param container - Container from which key need to be get.
-   * @param blockID - BlockID of the Key.
-   * @return Key Data.
-   * @throws IOException
-   */
-  KeyData getKey(Container container, BlockID blockID) throws IOException;
-
-  /**
-   * Deletes an existing Key.
-   *
-   * @param container - Container from which key need to be deleted.
-   * @param blockID - ID of the block.
-   * @throws StorageContainerException
-   */
-  void deleteKey(Container container, BlockID blockID) throws IOException;
-
-  /**
-   * List keys in a container.
-   *
-   * @param container - Container from which keys need to be listed.
-   * @param startLocalID  - Key to start from, 0 to begin.
-   * @param count    - Number of keys to return.
-   * @return List of Keys that match the criteria.
-   */
-  List<KeyData> listKey(Container container, long startLocalID, int count)
-      throws IOException;
-
-  /**
-   * Returns the last committed block length for the block.
-   * @param blockID blockId
-   */
-  long getCommittedBlockLength(Container container, BlockID blockID)
-      throws IOException;
-
-  /**
-   * Shutdown ContainerManager.
-   */
-  void shutdown();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
new file mode 100644
index 0000000..5129094
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.keyvalue.interfaces;
+/**
+ * Chunk manager and block manager interfaces for keyvalue container type.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 51eed7f..d96fbfa 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.ratis.shaded.com.google.protobuf
     .InvalidProtocolBufferException;
@@ -72,7 +72,7 @@ public class BlockDeletingService extends BackgroundService{
   private static final Logger LOG =
       LoggerFactory.getLogger(BlockDeletingService.class);
 
-  ContainerSet containerSet;
+  private ContainerSet containerSet;
   private ContainerDeletionChoosingPolicy containerDeletionPolicy;
   private final Configuration conf;
 
@@ -185,7 +185,7 @@ public class BlockDeletingService extends BackgroundService{
       ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
       long startTime = Time.monotonicNow();
       // Scan container's db and get list of under deletion blocks
-      MetadataStore meta = KeyUtils.getDB(
+      MetadataStore meta = BlockUtils.getDB(
           (KeyValueContainerData) containerData, conf);
       // # of blocks to delete is throttled
       KeyPrefixFilter filter =
@@ -211,8 +211,8 @@ public class BlockDeletingService extends BackgroundService{
         String blockName = DFSUtil.bytes2String(entry.getKey());
         LOG.debug("Deleting block {}", blockName);
         try {
-          ContainerProtos.KeyData data =
-              ContainerProtos.KeyData.parseFrom(entry.getValue());
+          ContainerProtos.BlockData data =
+              ContainerProtos.BlockData.parseFrom(entry.getValue());
           for (ContainerProtos.ChunkInfo chunkInfo : data.getChunksList()) {
             File chunkFile = dataDir.toPath()
                 .resolve(chunkInfo.getChunkName()).toFile();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
new file mode 100644
index 0000000..6fe6d81
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestBlockManagerImpl.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue;
+
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.volume
+    .RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.junit.Assert.*;
+import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.mock;
+
+/**
+ * This class is used to test key related operations on the container.
+ */
+public class TestBlockManagerImpl {
+
+  private OzoneConfiguration config;
+  private String scmId = UUID.randomUUID().toString();
+  private VolumeSet volumeSet;
+  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
+  private KeyValueContainerData keyValueContainerData;
+  private KeyValueContainer keyValueContainer;
+  private BlockData blockData;
+  private BlockManagerImpl blockManager;
+  private BlockID blockID;
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+
+  @Before
+  public void setUp() throws Exception {
+    config = new OzoneConfiguration();
+
+    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
+        .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
+        .toString()).build();
+
+    volumeSet = mock(VolumeSet.class);
+
+    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
+    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
+        .thenReturn(hddsVolume);
+
+    keyValueContainerData = new KeyValueContainerData(1L,
+        (long) StorageUnit.GB.toBytes(5));
+
+    keyValueContainer = new KeyValueContainer(
+        keyValueContainerData, config);
+
+    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+
+    // Creating BlockData
+    blockID = new BlockID(1L, 1L);
+    blockData = new BlockData(blockID);
+    blockData.addMetadata("VOLUME", "ozone");
+    blockData.addMetadata("OWNER", "hdfs");
+    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
+        .getLocalID(), 0), 0, 1024);
+    chunkList.add(info.getProtoBufMessage());
+    blockData.setChunks(chunkList);
+
+    // Create KeyValueContainerManager
+    blockManager = new BlockManagerImpl(config);
+
+  }
+
+  @Test
+  public void testPutAndGetBlock() throws Exception {
+    assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
+    //Put Block
+    blockManager.putBlock(keyValueContainer, blockData);
+
+    assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
+    //Get Block
+    BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer,
+        blockData.getBlockID());
+
+    assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID());
+    assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID());
+    assertEquals(blockData.getChunks().size(),
+        fromGetBlockData.getChunks().size());
+    assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata()
+        .size());
+
+  }
+
+
+  @Test
+  public void testDeleteBlock() throws Exception {
+    try {
+      assertEquals(0,
+          keyValueContainer.getContainerData().getKeyCount());
+      //Put Block
+      blockManager.putBlock(keyValueContainer, blockData);
+      assertEquals(1,
+          keyValueContainer.getContainerData().getKeyCount());
+      //Delete Block
+      blockManager.deleteBlock(keyValueContainer, blockID);
+      assertEquals(0,
+          keyValueContainer.getContainerData().getKeyCount());
+      try {
+        blockManager.getBlock(keyValueContainer, blockID);
+        fail("testDeleteBlock");
+      } catch (StorageContainerException ex) {
+        GenericTestUtils.assertExceptionContains(
+            "Unable to find the block", ex);
+      }
+    } catch (IOException ex) {
+      fail("testDeleteBlock failed");
+    }
+  }
+
+  @Test
+  public void testListBlock() throws Exception {
+    try {
+      blockManager.putBlock(keyValueContainer, blockData);
+      List<BlockData> listBlockData = blockManager.listBlock(
+          keyValueContainer, 1, 10);
+      assertNotNull(listBlockData);
+      assertTrue(listBlockData.size() == 1);
+
+      for (long i = 2; i <= 10; i++) {
+        blockID = new BlockID(1L, i);
+        blockData = new BlockData(blockID);
+        blockData.addMetadata("VOLUME", "ozone");
+        blockData.addMetadata("OWNER", "hdfs");
+        List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
+        ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
+            .getLocalID(), 0), 0, 1024);
+        chunkList.add(info.getProtoBufMessage());
+        blockData.setChunks(chunkList);
+        blockManager.putBlock(keyValueContainer, blockData);
+      }
+
+      listBlockData = blockManager.listBlock(
+          keyValueContainer, 1, 10);
+      assertNotNull(listBlockData);
+      assertTrue(listBlockData.size() == 10);
+
+    } catch (IOException ex) {
+      fail("testListBlock failed");
+    }
+  }
+
+  @Test
+  public void testGetNoSuchBlock() throws Exception {
+    try {
+      assertEquals(0,
+          keyValueContainer.getContainerData().getKeyCount());
+      //Put Block
+      blockManager.putBlock(keyValueContainer, blockData);
+      assertEquals(1,
+          keyValueContainer.getContainerData().getKeyCount());
+      //Delete Block
+      blockManager.deleteBlock(keyValueContainer, blockID);
+      assertEquals(0,
+          keyValueContainer.getContainerData().getKeyCount());
+      try {
+        //Since the block has been deleted, we should not be able to find it
+        blockManager.getBlock(keyValueContainer, blockID);
+        fail("testGetNoSuchBlock failed");
+      } catch (StorageContainerException ex) {
+        GenericTestUtils.assertExceptionContains(
+            "Unable to find the block", ex);
+        assertEquals(ContainerProtos.Result.NO_SUCH_BLOCK, ex.getResult());
+      }
+    } catch (IOException ex) {
+      fail("testGetNoSuchBlock failed");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
index 9664052..3c0876b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
@@ -88,7 +88,7 @@ public class TestChunkManagerImpl {
     keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
 
     data = "testing write chunks".getBytes();
-    // Creating KeyData
+    // Creating BlockData
     blockID = new BlockID(1L, 1L);
     chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID
         .getLocalID(), 0), 0, data.length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
deleted file mode 100644
index b05dbca..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue;
-
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.volume
-    .RoundRobinVolumeChoosingPolicy;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
-import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.junit.Assert.*;
-import static org.mockito.ArgumentMatchers.anyList;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.Mockito.mock;
-
-/**
- * This class is used to test key related operations on the container.
- */
-public class TestKeyManagerImpl {
-
-  private OzoneConfiguration config;
-  private String scmId = UUID.randomUUID().toString();
-  private VolumeSet volumeSet;
-  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
-  private KeyValueContainerData keyValueContainerData;
-  private KeyValueContainer keyValueContainer;
-  private KeyData keyData;
-  private KeyManagerImpl keyManager;
-  private BlockID blockID;
-
-  @Rule
-  public TemporaryFolder folder = new TemporaryFolder();
-
-
-  @Before
-  public void setUp() throws Exception {
-    config = new OzoneConfiguration();
-
-    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot()
-        .getAbsolutePath()).conf(config).datanodeUuid(UUID.randomUUID()
-        .toString()).build();
-
-    volumeSet = mock(VolumeSet.class);
-
-    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
-    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
-        .thenReturn(hddsVolume);
-
-    keyValueContainerData = new KeyValueContainerData(1L,
-        (long) StorageUnit.GB.toBytes(5));
-
-    keyValueContainer = new KeyValueContainer(
-        keyValueContainerData, config);
-
-    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
-
-    // Creating KeyData
-    blockID = new BlockID(1L, 1L);
-    keyData = new KeyData(blockID);
-    keyData.addMetadata("VOLUME", "ozone");
-    keyData.addMetadata("OWNER", "hdfs");
-    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-        .getLocalID(), 0), 0, 1024);
-    chunkList.add(info.getProtoBufMessage());
-    keyData.setChunks(chunkList);
-
-    // Create KeyValueContainerManager
-    keyManager = new KeyManagerImpl(config);
-
-  }
-
-  @Test
-  public void testPutAndGetKey() throws Exception {
-    assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
-    //Put Key
-    keyManager.putKey(keyValueContainer, keyData);
-
-    assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
-    //Get Key
-    KeyData fromGetKeyData = keyManager.getKey(keyValueContainer,
-        keyData.getBlockID());
-
-    assertEquals(keyData.getContainerID(), fromGetKeyData.getContainerID());
-    assertEquals(keyData.getLocalID(), fromGetKeyData.getLocalID());
-    assertEquals(keyData.getChunks().size(), fromGetKeyData.getChunks().size());
-    assertEquals(keyData.getMetadata().size(), fromGetKeyData.getMetadata()
-        .size());
-
-  }
-
-
-  @Test
-  public void testDeleteKey() throws Exception {
-    try {
-      assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
-      //Put Key
-      keyManager.putKey(keyValueContainer, keyData);
-      assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
-      //Delete Key
-      keyManager.deleteKey(keyValueContainer, blockID);
-      assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
-      try {
-        keyManager.getKey(keyValueContainer, blockID);
-        fail("testDeleteKey");
-      } catch (StorageContainerException ex) {
-        GenericTestUtils.assertExceptionContains("Unable to find the key", ex);
-      }
-    } catch (IOException ex) {
-      fail("testDeleteKey failed");
-    }
-  }
-
-  @Test
-  public void testListKey() throws Exception {
-    try {
-      keyManager.putKey(keyValueContainer, keyData);
-      List<KeyData> listKeyData = keyManager.listKey(
-          keyValueContainer, 1, 10);
-      assertNotNull(listKeyData);
-      assertTrue(listKeyData.size() == 1);
-
-      for (long i = 2; i <= 10; i++) {
-        blockID = new BlockID(1L, i);
-        keyData = new KeyData(blockID);
-        keyData.addMetadata("VOLUME", "ozone");
-        keyData.addMetadata("OWNER", "hdfs");
-        List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
-        ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
-            .getLocalID(), 0), 0, 1024);
-        chunkList.add(info.getProtoBufMessage());
-        keyData.setChunks(chunkList);
-        keyManager.putKey(keyValueContainer, keyData);
-      }
-
-      listKeyData = keyManager.listKey(
-          keyValueContainer, 1, 10);
-      assertNotNull(listKeyData);
-      assertTrue(listKeyData.size() == 10);
-
-    } catch (IOException ex) {
-      fail("testListKey failed");
-    }
-  }
-
-  @Test
-  public void testGetNoSuchKey() throws Exception {
-    try {
-      keyData = new KeyData(new BlockID(1L, 2L));
-      keyManager.getKey(keyValueContainer, new BlockID(1L, 2L));
-      fail("testGetNoSuchKey failed");
-    } catch (StorageContainerException ex) {
-      GenericTestUtils.assertExceptionContains("Unable to find the key.", ex);
-      assertEquals(ContainerProtos.Result.NO_SUCH_KEY, ex.getResult());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index f1fe88e..fbc5ad0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
@@ -114,8 +114,8 @@ public class TestKeyValueBlockIterator {
 
     int counter = 0;
     while(keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      assertEquals(keyData.getLocalID(), counter++);
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      assertEquals(blockData.getLocalID(), counter++);
     }
 
     assertFalse(keyValueBlockIterator.hasNext());
@@ -123,8 +123,8 @@ public class TestKeyValueBlockIterator {
     keyValueBlockIterator.seekToFirst();
     counter = 0;
     while(keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      assertEquals(keyData.getLocalID(), counter++);
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      assertEquals(blockData.getLocalID(), counter++);
     }
     assertFalse(keyValueBlockIterator.hasNext());
 
@@ -214,8 +214,8 @@ public class TestKeyValueBlockIterator {
 
     int counter = 5;
     while(keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      assertEquals(keyData.getLocalID(), counter++);
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      assertEquals(blockData.getLocalID(), counter++);
     }
   }
 
@@ -250,7 +250,7 @@ public class TestKeyValueBlockIterator {
     container = new KeyValueContainer(containerData, conf);
     container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID
         .randomUUID().toString());
-    MetadataStore metadataStore = KeyUtils.getDB(containerData, conf);
+    MetadataStore metadataStore = BlockUtils.getDB(containerData, conf);
 
     List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
     ChunkInfo info = new ChunkInfo("chunkfile", 0, 1024);
@@ -258,18 +258,18 @@ public class TestKeyValueBlockIterator {
 
     for (int i=0; i<normalBlocks; i++) {
       BlockID blockID = new BlockID(containerId, i);
-      KeyData keyData = new KeyData(blockID);
-      keyData.setChunks(chunkList);
-      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
+      BlockData blockData = new BlockData(blockID);
+      blockData.setChunks(chunkList);
+      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
           .getProtoBufMessage().toByteArray());
     }
 
     for (int i=normalBlocks; i<deletedBlocks; i++) {
       BlockID blockID = new BlockID(containerId, i);
-      KeyData keyData = new KeyData(blockID);
-      keyData.setChunks(chunkList);
+      BlockData blockData = new BlockData(blockID);
+      blockData.setChunks(chunkList);
       metadataStore.put(DFSUtil.string2Bytes(OzoneConsts
-          .DELETING_KEY_PREFIX + blockID.getLocalID()), keyData
+          .DELETING_KEY_PREFIX + blockID.getLocalID()), blockData
           .getProtoBufMessage().toByteArray());
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
index f84ba7d..bf6b8b0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
@@ -28,14 +28,14 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerLifeCycleState;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume
     .RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.utils.MetadataStore;
@@ -117,11 +117,11 @@ public class TestKeyValueContainer {
     addBlocks(blockCount);
     blockIterator = keyValueContainer.blockIterator();
     assertTrue(blockIterator.hasNext());
-    KeyData keyData;
+    BlockData blockData;
     int blockCounter = 0;
     while(blockIterator.hasNext()) {
-      keyData = blockIterator.nextBlock();
-      assertEquals(blockCounter++, keyData.getBlockID().getLocalID());
+      blockData = blockIterator.nextBlock();
+      assertEquals(blockCounter++, blockData.getBlockID().getLocalID());
     }
     assertEquals(blockCount, blockCounter);
   }
@@ -129,20 +129,20 @@ public class TestKeyValueContainer {
   private void addBlocks(int count) throws Exception {
     long containerId = keyValueContainerData.getContainerID();
 
-    MetadataStore metadataStore = KeyUtils.getDB(keyValueContainer
+    MetadataStore metadataStore = BlockUtils.getDB(keyValueContainer
         .getContainerData(), conf);
     for (int i=0; i < count; i++) {
-      // Creating KeyData
+      // Creating BlockData
       BlockID blockID = new BlockID(containerId, i);
-      KeyData keyData = new KeyData(blockID);
-      keyData.addMetadata("VOLUME", "ozone");
-      keyData.addMetadata("OWNER", "hdfs");
+      BlockData blockData = new BlockData(blockID);
+      blockData.addMetadata("VOLUME", "ozone");
+      blockData.addMetadata("OWNER", "hdfs");
       List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
       ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID
           .getLocalID(), 0), 0, 1024);
       chunkList.add(info.getProtoBufMessage());
-      keyData.setChunks(chunkList);
-      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), keyData
+      blockData.setChunks(chunkList);
+      metadataStore.put(Longs.toByteArray(blockID.getLocalID()), blockData
           .getProtoBufMessage().toByteArray());
     }
 
@@ -189,7 +189,7 @@ public class TestKeyValueContainer {
 
     int numberOfKeysToWrite = 12;
     //write one few keys to check the key count after import
-    MetadataStore metadataStore = KeyUtils.getDB(keyValueContainerData, conf);
+    MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, conf);
     for (int i = 0; i < numberOfKeysToWrite; i++) {
       metadataStore.put(("test" + i).getBytes(), "test".getBytes());
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index d91bbf7..e1904c1 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -142,31 +142,31 @@ public class TestKeyValueHandler {
     Mockito.verify(handler, times(1)).handleCloseContainer(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test Put Key Request handling
-    ContainerCommandRequestProto putKeyRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.PutKey);
-    dispatcher.dispatch(putKeyRequest);
-    Mockito.verify(handler, times(1)).handlePutKey(
+    // Test Put Block Request handling
+    ContainerCommandRequestProto putBlockRequest =
+        getDummyCommandRequestProto(ContainerProtos.Type.PutBlock);
+    dispatcher.dispatch(putBlockRequest);
+    Mockito.verify(handler, times(1)).handlePutBlock(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test Get Key Request handling
-    ContainerCommandRequestProto getKeyRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.GetKey);
-    dispatcher.dispatch(getKeyRequest);
-    Mockito.verify(handler, times(1)).handleGetKey(
+    // Test Get Block Request handling
+    ContainerCommandRequestProto getBlockRequest =
+        getDummyCommandRequestProto(ContainerProtos.Type.GetBlock);
+    dispatcher.dispatch(getBlockRequest);
+    Mockito.verify(handler, times(1)).handleGetBlock(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test Delete Key Request handling
-    ContainerCommandRequestProto deleteKeyRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.DeleteKey);
-    dispatcher.dispatch(deleteKeyRequest);
-    Mockito.verify(handler, times(1)).handleDeleteKey(
+    // Test Delete Block Request handling
+    ContainerCommandRequestProto deleteBlockRequest =
+        getDummyCommandRequestProto(ContainerProtos.Type.DeleteBlock);
+    dispatcher.dispatch(deleteBlockRequest);
+    Mockito.verify(handler, times(1)).handleDeleteBlock(
         any(ContainerCommandRequestProto.class), any());
 
-    // Test List Key Request handling
-    ContainerCommandRequestProto listKeyRequest =
-        getDummyCommandRequestProto(ContainerProtos.Type.ListKey);
-    dispatcher.dispatch(listKeyRequest);
+    // Test List Block Request handling
+    ContainerCommandRequestProto listBlockRequest =
+        getDummyCommandRequestProto(ContainerProtos.Type.ListBlock);
+    dispatcher.dispatch(listBlockRequest);
     Mockito.verify(handler, times(2)).handleUnsupportedOp(
         any(ContainerCommandRequestProto.class));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index 94966f6..2b10578 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -281,10 +281,10 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
         groupInputStream.streamOffset[i] = length;
         ContainerProtos.DatanodeBlockID datanodeBlockID = blockID
             .getDatanodeBlockIDProtobuf();
-        ContainerProtos.GetKeyResponseProto response = ContainerProtocolCalls
-            .getKey(xceiverClient, datanodeBlockID, requestId);
+        ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls
+            .getBlock(xceiverClient, datanodeBlockID, requestId);
         List<ContainerProtos.ChunkInfo> chunks =
-            response.getKeyData().getChunksList();
+            response.getBlockData().getChunksList();
         for (ContainerProtos.ChunkInfo chunk : chunks) {
           length += chunk.getLen();
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index d0e173c..0537f8a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -165,10 +165,10 @@ public class TestStorageContainerManagerHelper {
     DatanodeDetails leadDN = containerWithPipeline.getPipeline().getLeader();
     OzoneContainer containerServer =
         getContainerServerByDatanodeUuid(leadDN.getUuidString());
-    KeyValueContainerData containerData = (KeyValueContainerData) containerServer
-        .getContainerSet()
+    KeyValueContainerData containerData =
+        (KeyValueContainerData) containerServer.getContainerSet()
         .getContainer(containerID).getContainerData();
-    return KeyUtils.getDB(containerData, conf);
+    return BlockUtils.getDB(containerData, conf);
   }
 
   private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index ddff0c5..b4a0ba7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.junit.AfterClass;
@@ -469,9 +469,9 @@ public class TestOzoneRestClient {
         containerID, new File(containerPath));
     long valueLength = 0;
     while (keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      if (keyData.getBlockID().getLocalID() == localID) {
-        List<ContainerProtos.ChunkInfo> chunks = keyData.getChunks();
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      if (blockData.getBlockID().getLocalID() == localID) {
+        List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
         for (ContainerProtos.ChunkInfo chunk : chunks) {
           valueLength += chunk.getLen();
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index bf1eba6..cc045d0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.om.OzoneManager;
@@ -603,10 +603,10 @@ public class TestOzoneRpcClient {
     KeyValueBlockIterator keyValueBlockIterator = new KeyValueBlockIterator(
         containerID, new File(containerPath));
     while (keyValueBlockIterator.hasNext()) {
-      KeyData keyData = keyValueBlockIterator.nextBlock();
-      if (keyData.getBlockID().getLocalID() == localID) {
+      BlockData blockData = keyValueBlockIterator.nextBlock();
+      if (blockData.getBlockID().getLocalID() == localID) {
         long length = 0;
-        List<ContainerProtos.ChunkInfo> chunks = keyData.getChunks();
+        List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
         for (ContainerProtos.ChunkInfo chunk : chunks) {
           length += chunk.getLen();
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 0c86828..f278479 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.util.Time;
 import org.junit.Assert;
@@ -241,18 +241,18 @@ public final class ContainerTestHelper {
     setDataChecksum(info, data);
 
 
-    ContainerProtos.PutKeyRequestProto.Builder putRequest =
-        ContainerProtos.PutKeyRequestProto.newBuilder();
+    ContainerProtos.PutBlockRequestProto.Builder putRequest =
+        ContainerProtos.PutBlockRequestProto.newBuilder();
 
-    KeyData keyData = new KeyData(blockID);
+    BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
     newList.add(info.getProtoBufMessage());
-    keyData.setChunks(newList);
-    putRequest.setKeyData(keyData.getProtoBufMessage());
+    blockData.setChunks(newList);
+    putRequest.setBlockData(blockData.getProtoBufMessage());
 
     smallFileRequest.setChunkInfo(info.getProtoBufMessage());
     smallFileRequest.setData(ByteString.copyFrom(data));
-    smallFileRequest.setKey(putRequest);
+    smallFileRequest.setBlock(putRequest);
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
@@ -266,17 +266,17 @@ public final class ContainerTestHelper {
 
 
   public static ContainerCommandRequestProto getReadSmallFileRequest(
-      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKey)
+      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putKey)
       throws Exception {
     ContainerProtos.GetSmallFileRequestProto.Builder smallFileRequest =
         ContainerProtos.GetSmallFileRequestProto.newBuilder();
-    ContainerCommandRequestProto getKey = getKeyRequest(pipeline, putKey);
-    smallFileRequest.setKey(getKey.getGetKey());
+    ContainerCommandRequestProto getKey = getBlockRequest(pipeline, putKey);
+    smallFileRequest.setBlock(getKey.getGetBlock());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.GetSmallFile);
-    request.setContainerID(getKey.getGetKey().getBlockID().getContainerID());
+    request.setContainerID(getKey.getGetBlock().getBlockID().getContainerID());
     request.setGetSmallFile(smallFileRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
@@ -421,58 +421,58 @@ public final class ContainerTestHelper {
   }
 
   /**
-   * Returns the PutKeyRequest for test purpose.
+   * Returns the PutBlockRequest for test purpose.
    * @param pipeline - pipeline.
    * @param writeRequest - Write Chunk Request.
    * @return - Request
    */
-  public static ContainerCommandRequestProto getPutKeyRequest(
+  public static ContainerCommandRequestProto getPutBlockRequest(
       Pipeline pipeline, ContainerProtos.WriteChunkRequestProto writeRequest) {
-    LOG.trace("putKey: {} to pipeline={}",
+    LOG.trace("putBlock: {} to pipeline={}",
         writeRequest.getBlockID());
 
-    ContainerProtos.PutKeyRequestProto.Builder putRequest =
-        ContainerProtos.PutKeyRequestProto.newBuilder();
+    ContainerProtos.PutBlockRequestProto.Builder putRequest =
+        ContainerProtos.PutBlockRequestProto.newBuilder();
 
-    KeyData keyData = new KeyData(
+    BlockData blockData = new BlockData(
         BlockID.getFromProtobuf(writeRequest.getBlockID()));
     List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
     newList.add(writeRequest.getChunkData());
-    keyData.setChunks(newList);
-    putRequest.setKeyData(keyData.getProtoBufMessage());
+    blockData.setChunks(newList);
+    putRequest.setBlockData(blockData.getProtoBufMessage());
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.PutKey);
-    request.setContainerID(keyData.getContainerID());
-    request.setPutKey(putRequest);
+    request.setCmdType(ContainerProtos.Type.PutBlock);
+    request.setContainerID(blockData.getContainerID());
+    request.setPutBlock(putRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
   }
 
   /**
-   * Gets a GetKeyRequest for test purpose.
+   * Gets a GetBlockRequest for test purpose.
    * @param  pipeline - pipeline
-   * @param putKeyRequest - putKeyRequest.
+   * @param putBlockRequest - putBlockRequest.
    * @return - Request
    * immediately.
    */
-  public static ContainerCommandRequestProto getKeyRequest(
-      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
+  public static ContainerCommandRequestProto getBlockRequest(
+      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) {
     ContainerProtos.DatanodeBlockID blockID =
-        putKeyRequest.getKeyData().getBlockID();
+        putBlockRequest.getBlockData().getBlockID();
     LOG.trace("getKey: blockID={}", blockID);
 
-    ContainerProtos.GetKeyRequestProto.Builder getRequest =
-        ContainerProtos.GetKeyRequestProto.newBuilder();
+    ContainerProtos.GetBlockRequestProto.Builder getRequest =
+        ContainerProtos.GetBlockRequestProto.newBuilder();
     getRequest.setBlockID(blockID);
 
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.GetKey);
+    request.setCmdType(ContainerProtos.Type.GetBlock);
     request.setContainerID(blockID.getContainerID());
-    request.setGetKey(getRequest);
+    request.setGetBlock(getRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();
@@ -484,32 +484,32 @@ public final class ContainerTestHelper {
    * @param request - Request
    * @param response - Response
    */
-  public static void verifyGetKey(ContainerCommandRequestProto request,
+  public static void verifyGetBlock(ContainerCommandRequestProto request,
       ContainerCommandResponseProto response, int expectedChunksCount) {
     Assert.assertEquals(request.getTraceID(), response.getTraceID());
     Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
     Assert.assertEquals(expectedChunksCount,
-        response.getGetKey().getKeyData().getChunksCount());
+        response.getGetBlock().getBlockData().getChunksCount());
   }
 
   /**
    * @param pipeline - pipeline.
-   * @param putKeyRequest - putKeyRequest.
+   * @param putBlockRequest - putBlockRequest.
    * @return - Request
    */
-  public static ContainerCommandRequestProto getDeleteKeyRequest(
-      Pipeline pipeline, ContainerProtos.PutKeyRequestProto putKeyRequest) {
-    ContainerProtos.DatanodeBlockID blockID = putKeyRequest.getKeyData()
+  public static ContainerCommandRequestProto getDeleteBlockRequest(
+      Pipeline pipeline, ContainerProtos.PutBlockRequestProto putBlockRequest) {
+    ContainerProtos.DatanodeBlockID blockID = putBlockRequest.getBlockData()
         .getBlockID();
-    LOG.trace("deleteKey: name={}", blockID);
-    ContainerProtos.DeleteKeyRequestProto.Builder delRequest =
-        ContainerProtos.DeleteKeyRequestProto.newBuilder();
+    LOG.trace("deleteBlock: name={}", blockID);
+    ContainerProtos.DeleteBlockRequestProto.Builder delRequest =
+        ContainerProtos.DeleteBlockRequestProto.newBuilder();
     delRequest.setBlockID(blockID);
     ContainerCommandRequestProto.Builder request =
         ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.DeleteKey);
+    request.setCmdType(ContainerProtos.Type.DeleteBlock);
     request.setContainerID(blockID.getContainerID());
-    request.setDeleteKey(delRequest);
+    request.setDeleteBlock(delRequest);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     return request.build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 7391b25..52cebb3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
@@ -100,17 +100,20 @@ public class TestContainerReplication {
 
     DatanodeBlockID blockID = requestProto.getWriteChunk().getBlockID();
 
-    // Put Key to the test container
-    ContainerCommandRequestProto putKeyRequest = ContainerTestHelper
-        .getPutKeyRequest(sourcePipelines, requestProto.getWriteChunk());
+    // Put Block to the test container
+    ContainerCommandRequestProto putBlockRequest = ContainerTestHelper
+        .getPutBlockRequest(sourcePipelines, requestProto.getWriteChunk());
 
-    ContainerProtos.KeyData keyData = putKeyRequest.getPutKey().getKeyData();
+    ContainerProtos.BlockData blockData =
+        putBlockRequest.getPutBlock().getBlockData();
 
-    ContainerCommandResponseProto response = client.sendCommand(putKeyRequest);
+    ContainerCommandResponseProto response =
+        client.sendCommand(putBlockRequest);
 
     Assert.assertNotNull(response);
     Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
+    Assert.assertTrue(
+        putBlockRequest.getTraceID().equals(response.getTraceID()));
 
     HddsDatanodeService destinationDatanode =
         chooseDatanodeWithoutContainer(sourcePipelines,
@@ -147,8 +150,8 @@ public class TestContainerReplication {
     KeyValueHandler handler = (KeyValueHandler) ozoneContainer.getDispatcher()
         .getHandler(ContainerType.KeyValueContainer);
 
-    KeyData key = handler.getKeyManager()
-        .getKey(container, BlockID.getFromProtobuf(blockID));
+    BlockData key = handler.getBlockManager()
+        .getBlock(container, BlockID.getFromProtobuf(blockID));
 
     Assert.assertNotNull(key);
     Assert.assertEquals(1, key.getChunks().size());
@@ -164,7 +167,8 @@ public class TestContainerReplication {
         return datanode;
       }
     }
-    throw new AssertionError("No datanode outside of the pipeline");
+    throw new AssertionError(
+        "No datanode outside of the pipeline");
   }
 
   static OzoneConfiguration newOzoneConfiguration() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 25c8c6b..7e30c5f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -33,9 +34,8 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingP
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
     .BlockDeletingService;
@@ -117,13 +117,13 @@ public class TestBlockDeletingService {
       containerSet.addContainer(container);
       data = (KeyValueContainerData) containerSet.getContainer(
           containerID).getContainerData();
-      MetadataStore metadata = KeyUtils.getDB(data, conf);
+      MetadataStore metadata = BlockUtils.getDB(data, conf);
       for (int j = 0; j<numOfBlocksPerContainer; j++) {
         BlockID blockID =
             ContainerTestHelper.getTestBlockID(containerID);
         String deleteStateName = OzoneConsts.DELETING_KEY_PREFIX +
             blockID.getLocalID();
-        KeyData kd = new KeyData(blockID);
+        BlockData kd = new BlockData(blockID);
         List<ContainerProtos.ChunkInfo> chunks = Lists.newArrayList();
         for (int k = 0; k<numOfChunksPerBlock; k++) {
           // offset doesn't matter here
@@ -200,7 +200,7 @@ public class TestBlockDeletingService {
     containerSet.listContainer(0L, 1, containerData);
     Assert.assertEquals(1, containerData.size());
 
-    MetadataStore meta = KeyUtils.getDB(
+    MetadataStore meta = BlockUtils.getDB(
         (KeyValueContainerData) containerData.get(0), conf);
     Map<Long, Container> containerMap = containerSet.getContainerMap();
     // NOTE: this test assumes that all the container is KetValueContainer and
@@ -309,7 +309,7 @@ public class TestBlockDeletingService {
     // get container meta data
     List<ContainerData> containerData = Lists.newArrayList();
     containerSet.listContainer(0L, 1, containerData);
-    MetadataStore meta = KeyUtils.getDB(
+    MetadataStore meta = BlockUtils.getDB(
         (KeyValueContainerData) containerData.get(0), conf);
 
     LogCapturer newLog = LogCapturer.captureLogs(BackgroundService.LOG);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDFS-13868. WebHDFS: GETSNAPSHOTDIFF API NPE when param "snapshotname" is given but "oldsnapshotname" is not. Contributed by Pranay Singh.

Posted by xy...@apache.org.
HDFS-13868. WebHDFS: GETSNAPSHOTDIFF API NPE when param "snapshotname" is given but "oldsnapshotname" is not. Contributed by Pranay Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28ceb34a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28ceb34a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28ceb34a

Branch: refs/heads/HDDS-4
Commit: 28ceb34a725cd06d28fb51361c49bb45464f5368
Parents: e435e12
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Wed Sep 19 03:11:29 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Wed Sep 19 03:12:20 2018 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java        | 4 ++++
 .../namenode/snapshot/DirectorySnapshottableFeature.java       | 2 +-
 .../hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java  | 6 ++++++
 .../src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java  | 6 ++++++
 4 files changed, 17 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28ceb34a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index f4d11b9..38072b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2158,6 +2158,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
       String fromSnapshot, String toSnapshot) throws IOException {
     checkOpen();
     try (TraceScope ignored = tracer.newScope("getSnapshotDiffReport")) {
+      Preconditions.checkArgument(fromSnapshot != null,
+          "null fromSnapshot");
+      Preconditions.checkArgument(toSnapshot != null,
+          "null toSnapshot");
       return namenode
           .getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot);
     } catch (RemoteException re) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28ceb34a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index d3083cf..15aa22a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -277,7 +277,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
     Snapshot fromSnapshot = getSnapshotByName(snapshotRootDir, from);
     Snapshot toSnapshot = getSnapshotByName(snapshotRootDir, to);
     // if the start point is equal to the end point, return null
-    if (from.equals(to)) {
+    if (from != null && from.equals(to)) {
       return null;
     }
     SnapshotDiffInfo diffs = new SnapshotDiffInfo(snapshotRootDir,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28ceb34a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
index 4625988..18ec3c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
@@ -230,6 +230,12 @@ public class TestSnapshotDiffReport {
     LOG.info(report.toString());
     assertEquals(0, report.getDiffList().size());
 
+    try {
+      report = hdfs.getSnapshotDiffReport(subsubsub1, null, "s2");
+      fail("Expect exception when providing null fromSnapshot ");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("null fromSnapshot", e);
+    }
     report = hdfs.getSnapshotDiffReport(subsubsub1, "s0", "s2");
     LOG.info(report.toString());
     assertEquals(0, report.getDiffList().size());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28ceb34a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 5d33220..a766de6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -743,6 +743,12 @@ public class TestWebHDFS {
       Assert.assertTrue(diffReport.getDiffList().contains(entry3));
       Assert.assertTrue(diffReport.getDiffList().contains(entry4));
       Assert.assertEquals(diffReport.getDiffList().size(), 5);
+
+      // Test with fromSnapshot and toSnapshot as null.
+      diffReport = webHdfs.getSnapshotDiffReport(foo, null, "s2");
+      Assert.assertEquals(diffReport.getDiffList().size(), 0);
+      diffReport = webHdfs.getSnapshotDiffReport(foo, "s1", null);
+      Assert.assertEquals(diffReport.getDiffList().size(), 5);
     } finally {
       if (cluster != null) {
         cluster.shutdown();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. Contributed by Oleksandr Shevchenko

Posted by xy...@apache.org.
MAPREDUCE-7137. MRAppBenchmark.benchmark1() fails with NullPointerException. Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34b2237e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34b2237e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34b2237e

Branch: refs/heads/HDDS-4
Commit: 34b2237e420cfbe3a97ddd44968de8bbe1ed30ab
Parents: 5c2ae7e
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Sep 18 16:56:31 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Sep 18 16:56:31 2018 -0500

----------------------------------------------------------------------
 .../java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34b2237e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 025a8fa..5e6697b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRespo
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -184,7 +185,7 @@ public class MRAppBenchmark {
     }
   }
 
-  @Test
+  @Test(timeout = 60000)
   public void benchmark1() throws Exception {
     int maps = 100; // Adjust for benchmarking. Start with thousands.
     int reduces = 0;
@@ -211,6 +212,7 @@ public class MRAppBenchmark {
                     Records.newRecord(RegisterApplicationMasterResponse.class);
                 response.setMaximumResourceCapability(Resource.newInstance(
                   10240, 1));
+                response.setQueue("queue1");
                 return response;
               }
 
@@ -252,6 +254,7 @@ public class MRAppBenchmark {
                 response.setAllocatedContainers(containers);
                 response.setResponseId(request.getResponseId() + 1);
                 response.setNumClusterNodes(350);
+                response.setApplicationPriority(Priority.newInstance(100));
                 return response;
               }
             };


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HDFS-13833. Improve BlockPlacementPolicyDefault's consider load logic. Contributed by Shweta.

Posted by xy...@apache.org.
HDFS-13833. Improve BlockPlacementPolicyDefault's consider load logic. Contributed by Shweta.

Signed-off-by: Xiao Chen <xi...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27978bcb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27978bcb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27978bcb

Branch: refs/heads/HDDS-4
Commit: 27978bcb66a9130cbf26d37ec454c0b7fcdc2530
Parents: 3929653
Author: Shweta <sh...@cloudera.com>
Authored: Tue Sep 18 20:22:25 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Tue Sep 18 20:23:50 2018 -0700

----------------------------------------------------------------------
 .../BlockPlacementPolicyDefault.java            | 29 ++++++++++++++------
 .../blockmanagement/TestReplicationPolicy.java  | 28 +++++++++++++++++++
 2 files changed, 49 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27978bcb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index d00f961..d396845 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -913,6 +913,24 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   }
 
   /**
+   * Determine if a datanode should be chosen based on current workload.
+   *
+   * @param node The target datanode
+   * @return Return true if the datanode should be excluded, otherwise false
+   */
+  boolean excludeNodeByLoad(DatanodeDescriptor node){
+    final double maxLoad = considerLoadFactor *
+        stats.getInServiceXceiverAverage();
+    final int nodeLoad = node.getXceiverCount();
+    if ((nodeLoad > maxLoad) && (maxLoad > 0)) {
+      logNodeIsNotChosen(node, NodeNotChosenReason.NODE_TOO_BUSY,
+          "(load: " + nodeLoad + " > " + maxLoad + ")");
+      return true;
+    }
+    return false;
+  }
+
+  /**
    * Determine if a datanode is good for placing block.
    *
    * @param node The target datanode
@@ -923,7 +941,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    * @param results A list containing currently chosen nodes. Used to check if
    *                too many nodes has been chosen in the target rack.
    * @param avoidStaleNodes Whether or not to avoid choosing stale nodes
-   * @return Reture true if the datanode is good candidate, otherwise false
+   * @return Return true if the datanode is good candidate, otherwise false
    */
   boolean isGoodDatanode(DatanodeDescriptor node,
                          int maxTargetPerRack, boolean considerLoad,
@@ -943,13 +961,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     }
 
     // check the communication traffic of the target machine
-    if (considerLoad) {
-      final double maxLoad = considerLoadFactor *
-          stats.getInServiceXceiverAverage();
-      final int nodeLoad = node.getXceiverCount();
-      if (nodeLoad > maxLoad) {
-        logNodeIsNotChosen(node, NodeNotChosenReason.NODE_TOO_BUSY,
-            "(load: " + nodeLoad + " > " + maxLoad + ")");
+    if(considerLoad){
+      if(excludeNodeByLoad(node)){
         return false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27978bcb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 27dcbf1..f08fa13 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.TestINodeFile;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.net.Node;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.log4j.spi.LoggingEvent;
@@ -1559,4 +1560,31 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
     }
     assertTrue(found);
   }
+
+  @Test
+  public void testMaxLoad() {
+    FSClusterStats statistics = mock(FSClusterStats.class);
+    DatanodeDescriptor node = mock(DatanodeDescriptor.class);
+
+    when(statistics.getInServiceXceiverAverage()).thenReturn(0.0);
+    when(node.getXceiverCount()).thenReturn(1);
+
+    final Configuration conf = new Configuration();
+    final Class<? extends BlockPlacementPolicy> replicatorClass = conf
+        .getClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+            DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT,
+            BlockPlacementPolicy.class);
+    BlockPlacementPolicy bpp = ReflectionUtils.
+        newInstance(replicatorClass, conf);
+    assertTrue(bpp instanceof  BlockPlacementPolicyDefault);
+
+    BlockPlacementPolicyDefault bppd = (BlockPlacementPolicyDefault) bpp;
+    bppd.initialize(conf, statistics, null, null);
+    assertFalse(bppd.excludeNodeByLoad(node));
+
+    when(statistics.getInServiceXceiverAverage()).thenReturn(1.0);
+    when(node.getXceiverCount()).thenReturn(10);
+    assertTrue(bppd.excludeNodeByLoad(node));
+
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HDDS-460. Replication manager failed to import container data. Contributed by Elek, Marton.

Posted by xy...@apache.org.
HDDS-460. Replication manager failed to import container data. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/042bf74d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/042bf74d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/042bf74d

Branch: refs/heads/HDDS-4
Commit: 042bf74d5eb3dc627658b8c4c027628569169513
Parents: efdea85
Author: Nanda kumar <na...@apache.org>
Authored: Wed Sep 19 23:51:50 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Wed Sep 19 23:51:50 2018 +0530

----------------------------------------------------------------------
 .../statemachine/DatanodeStateMachine.java      |  19 ++-
 .../ReplicateContainerCommandHandler.java       | 120 ++------------
 .../replication/ContainerReplicator.java        |  27 +++
 .../DownloadAndImportReplicator.java            | 136 ++++++++++++++++
 .../replication/GrpcReplicationClient.java      |   2 +-
 .../replication/ReplicationSupervisor.java      | 142 ++++++++++++++++
 .../container/replication/ReplicationTask.java  | 102 ++++++++++++
 .../TestReplicateContainerCommandHandler.java   | 163 -------------------
 .../replication/TestReplicationSupervisor.java  | 143 ++++++++++++++++
 .../container/replication/package-info.java     |  22 +++
 10 files changed, 602 insertions(+), 274 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 875d063..1bade8e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -40,7 +40,12 @@ import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
     .DeleteBlocksCommandHandler;
 import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
     .ReplicateContainerCommandHandler;
+import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+import org.apache.hadoop.ozone.container.replication.ContainerReplicator;
+import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator;
+import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
+import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
@@ -69,6 +74,7 @@ public class DatanodeStateMachine implements Closeable {
   private AtomicLong nextHB;
   private Thread stateMachineThread = null;
   private Thread cmdProcessThread = null;
+  private final ReplicationSupervisor supervisor;
 
   /**
    * Constructs a a datanode state machine.
@@ -89,14 +95,21 @@ public class DatanodeStateMachine implements Closeable {
         new OzoneConfiguration(conf), context);
     nextHB = new AtomicLong(Time.monotonicNow());
 
+    ContainerReplicator replicator =
+        new DownloadAndImportReplicator(container.getContainerSet(),
+            container.getDispatcher(),
+            new SimpleContainerDownloader(conf), new TarContainerPacker());
+
+    supervisor =
+        new ReplicationSupervisor(container.getContainerSet(), replicator, 10);
+
     // When we add new handlers just adding a new handler here should do the
      // trick.
     commandDispatcher = CommandDispatcher.newBuilder()
         .addHandler(new CloseContainerCommandHandler())
         .addHandler(new DeleteBlocksCommandHandler(container.getContainerSet(),
             conf))
-        .addHandler(new ReplicateContainerCommandHandler(conf,
-            container.getContainerSet(), container.getDispatcher()))
+        .addHandler(new ReplicateContainerCommandHandler(conf, supervisor))
         .setConnectionManager(connectionManager)
         .setContainer(container)
         .setContext(context)
@@ -295,6 +308,7 @@ public class DatanodeStateMachine implements Closeable {
   public void startDaemon() {
     Runnable startStateMachineTask = () -> {
       try {
+        supervisor.start();
         start();
         LOG.info("Ozone container server started.");
       } catch (Exception ex) {
@@ -323,6 +337,7 @@ public class DatanodeStateMachine implements Closeable {
    */
   public synchronized void stopDaemon() {
     try {
+      supervisor.stop();
       context.setState(DatanodeStates.SHUTDOWN);
       reportManager.shutdown();
       this.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
index cb677c2..09c379f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
@@ -16,33 +16,17 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
-import java.io.FileInputStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
 import java.util.List;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.container.replication.ContainerDownloader;
-import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader;
+import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor;
+import org.apache.hadoop.ozone.container.replication.ReplicationTask;
 import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 
@@ -58,39 +42,19 @@ public class ReplicateContainerCommandHandler implements CommandHandler {
   static final Logger LOG =
       LoggerFactory.getLogger(ReplicateContainerCommandHandler.class);
 
-  private ContainerDispatcher containerDispatcher;
-
   private int invocationCount;
 
   private long totalTime;
 
-  private ContainerDownloader downloader;
-
   private Configuration conf;
 
-  private TarContainerPacker packer = new TarContainerPacker();
-
-  private ContainerSet containerSet;
-
-  private Lock lock = new ReentrantLock();
+  private ReplicationSupervisor supervisor;
 
   public ReplicateContainerCommandHandler(
       Configuration conf,
-      ContainerSet containerSet,
-      ContainerDispatcher containerDispatcher,
-      ContainerDownloader downloader) {
+      ReplicationSupervisor supervisor) {
     this.conf = conf;
-    this.containerSet = containerSet;
-    this.downloader = downloader;
-    this.containerDispatcher = containerDispatcher;
-  }
-
-  public ReplicateContainerCommandHandler(
-      Configuration conf,
-      ContainerSet containerSet,
-      ContainerDispatcher containerDispatcher) {
-    this(conf, containerSet, containerDispatcher,
-        new SimpleContainerDownloader(conf));
+    this.supervisor = supervisor;
   }
 
   @Override
@@ -108,72 +72,12 @@ public class ReplicateContainerCommandHandler implements CommandHandler {
           String.format("Replication command is received for container %d "
               + "but the size of source datanodes was 0.", containerID));
 
-      LOG.info("Starting replication of container {} from {}", containerID,
-          sourceDatanodes);
-      CompletableFuture<Path> tempTarFile = downloader
-          .getContainerDataFromReplicas(containerID,
-              sourceDatanodes);
-
-      CompletableFuture<Void> result =
-          tempTarFile.thenAccept(path -> {
-            LOG.info("Container {} is downloaded, starting to import.",
-                containerID);
-            importContainer(containerID, path);
-          });
-
-      result.whenComplete((aVoid, throwable) -> {
-        if (throwable != null) {
-          LOG.error("Container replication was unsuccessful .", throwable);
-        } else {
-          LOG.info("Container {} is replicated successfully", containerID);
-        }
-      });
-    } finally {
-      updateCommandStatus(context, command, true, LOG);
+      ReplicationTask replicationTask =
+          new ReplicationTask(containerID, sourceDatanodes);
+      supervisor.addTask(replicationTask);
 
-    }
-  }
-
-  protected void importContainer(long containerID, Path tarFilePath) {
-    lock.lock();
-    try {
-      ContainerData originalContainerData;
-      try (FileInputStream tempContainerTarStream = new FileInputStream(
-          tarFilePath.toFile())) {
-        byte[] containerDescriptorYaml =
-            packer.unpackContainerDescriptor(tempContainerTarStream);
-        originalContainerData = ContainerDataYaml.readContainer(
-            containerDescriptorYaml);
-      }
-
-      try (FileInputStream tempContainerTarStream = new FileInputStream(
-          tarFilePath.toFile())) {
-
-        Handler handler = containerDispatcher.getHandler(
-            originalContainerData.getContainerType());
-
-        Container container = handler.importContainer(containerID,
-            originalContainerData.getMaxSize(),
-            tempContainerTarStream,
-            packer);
-
-        containerSet.addContainer(container);
-      }
-
-    } catch (Exception e) {
-      LOG.error(
-          "Can't import the downloaded container data id=" + containerID,
-          e);
-      try {
-        Files.delete(tarFilePath);
-      } catch (Exception ex) {
-        LOG.error(
-            "Container import is failed and the downloaded file can't be "
-                + "deleted: "
-                + tarFilePath.toAbsolutePath().toString());
-      }
     } finally {
-      lock.unlock();
+      updateCommandStatus(context, command, true, LOG);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java
new file mode 100644
index 0000000..827b9d6
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerReplicator.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+/**
+ * Service to do the real replication task.
+ *
+ * An implementation should download the container and im
+ */
+public interface ContainerReplicator {
+  void replicate(ReplicationTask task);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java
new file mode 100644
index 0000000..5ef5841
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/DownloadAndImportReplicator.java
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.io.FileInputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
+import org.apache.hadoop.ozone.container.common.interfaces.Handler;
+import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
+import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Default replication implementation.
+ * <p>
+ * This class does the real job. Executes the download and import the container
+ * to the container set.
+ */
+public class DownloadAndImportReplicator implements ContainerReplicator {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DownloadAndImportReplicator.class);
+
+  private final ContainerSet containerSet;
+
+  private final ContainerDispatcher containerDispatcher;
+
+  private final ContainerDownloader downloader;
+
+  private final TarContainerPacker packer;
+
+  public DownloadAndImportReplicator(
+      ContainerSet containerSet,
+      ContainerDispatcher containerDispatcher,
+      ContainerDownloader downloader,
+      TarContainerPacker packer) {
+    this.containerSet = containerSet;
+    this.containerDispatcher = containerDispatcher;
+    this.downloader = downloader;
+    this.packer = packer;
+  }
+
+  public void importContainer(long containerID, Path tarFilePath) {
+    try {
+      ContainerData originalContainerData;
+      try (FileInputStream tempContainerTarStream = new FileInputStream(
+          tarFilePath.toFile())) {
+        byte[] containerDescriptorYaml =
+            packer.unpackContainerDescriptor(tempContainerTarStream);
+        originalContainerData = ContainerDataYaml.readContainer(
+            containerDescriptorYaml);
+      }
+
+      try (FileInputStream tempContainerTarStream = new FileInputStream(
+          tarFilePath.toFile())) {
+
+        Handler handler = containerDispatcher.getHandler(
+            originalContainerData.getContainerType());
+
+        Container container = handler.importContainer(containerID,
+            originalContainerData.getMaxSize(),
+            tempContainerTarStream,
+            packer);
+
+        containerSet.addContainer(container);
+      }
+
+    } catch (Exception e) {
+      LOG.error(
+          "Can't import the downloaded container data id=" + containerID,
+          e);
+      try {
+        Files.delete(tarFilePath);
+      } catch (Exception ex) {
+        LOG.error(
+            "Container import is failed and the downloaded file can't be "
+                + "deleted: "
+                + tarFilePath.toAbsolutePath().toString());
+      }
+    }
+  }
+
+  @Override
+  public void replicate(ReplicationTask task) {
+    long containerID = task.getContainerId();
+
+    List<DatanodeDetails> sourceDatanodes = task.getSources();
+
+    LOG.info("Starting replication of container {} from {}", containerID,
+        sourceDatanodes);
+
+    CompletableFuture<Path> tempTarFile = downloader
+        .getContainerDataFromReplicas(containerID,
+            sourceDatanodes);
+
+    try {
+      //wait for the download. This thread pool is limiting the paralell
+      //downloads, so it's ok to block here and wait for the full download.
+      Path path = tempTarFile.get();
+      LOG.info("Container {} is downloaded, starting to import.",
+          containerID);
+      importContainer(containerID, path);
+      LOG.info("Container {} is replicated successfully", containerID);
+      task.setStatus(Status.DONE);
+    } catch (Exception e) {
+      LOG.error("Container replication was unsuccessful .", e);
+      task.setStatus(Status.FAILED);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
index 91d098f..3aafb0c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
@@ -157,8 +157,8 @@ public class GrpcReplicationClient {
     public void onCompleted() {
       try {
         stream.close();
-        response.complete(outputPath);
         LOG.info("Container is downloaded to {}", outputPath);
+        response.complete(outputPath);
       } catch (IOException e) {
         response.completeExceptionally(e);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
new file mode 100644
index 0000000..1d8d5f6
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Single point to schedule the downloading tasks based on priorities.
+ */
+public class ReplicationSupervisor {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReplicationSupervisor.class);
+
+  private final Set<Worker> threadPool = new HashSet<>();
+
+  private final Map<Long, ReplicationTask> queue = new TreeMap();
+
+  private final ContainerSet containerSet;
+
+  private final ContainerReplicator replicator;
+
+  private final int poolSize;
+
+  public ReplicationSupervisor(
+      ContainerSet containerSet,
+      ContainerReplicator replicator, int poolSize) {
+    this.containerSet = containerSet;
+    this.replicator = replicator;
+    this.poolSize = poolSize;
+  }
+
+  public synchronized void addTask(ReplicationTask task) {
+    queue.putIfAbsent(task.getContainerId(), task);
+    synchronized (threadPool) {
+      threadPool.notify();
+    }
+  }
+
+  public void start() {
+    for (int i = 0; i < poolSize; i++) {
+      Worker worker = new Worker();
+      Thread thread = new Thread(worker, "ContainerReplication-" + i);
+      thread.setDaemon(true);
+      thread.start();
+      threadPool.add(worker);
+    }
+  }
+
+  public synchronized ReplicationTask selectTask() {
+    for (ReplicationTask task : queue.values()) {
+      if (task.getStatus() == Status.QUEUED) {
+        if (containerSet.getContainer(task.getContainerId()) == null) {
+          task.setStatus(Status.DOWNLOADING);
+          return task;
+        } else {
+          LOG.debug("Container {} has already been downloaded.",
+              task.getContainerId());
+          queue.remove(task.getContainerId());
+        }
+      } else if (task.getStatus() == Status.FAILED) {
+        LOG.error(
+            "Container {} can't be downloaded from any of the datanodes.",
+            task.getContainerId());
+        queue.remove(task.getContainerId());
+      } else if (task.getStatus() == Status.DONE) {
+        queue.remove(task.getContainerId());
+        LOG.info("Container {} is replicated.", task.getContainerId());
+      }
+    }
+    //no available task.
+    return null;
+  }
+
+  public void stop() {
+    for (Worker worker : threadPool) {
+      worker.stop();
+    }
+  }
+
+  @VisibleForTesting
+  public int getQueueSize() {
+    return queue.size();
+  }
+
+  private class Worker implements Runnable {
+
+    private boolean running = true;
+
+    @Override
+    public void run() {
+      try {
+        while (running) {
+          ReplicationTask task = selectTask();
+          if (task == null) {
+            synchronized (threadPool) {
+              threadPool.wait();
+            }
+          } else {
+            replicator.replicate(task);
+          }
+        }
+      } catch (Exception ex) {
+        LOG.error("Error on doing replication", ex);
+        try {
+          Thread.sleep(200);
+        } catch (InterruptedException e) {
+          LOG.error("Error on waiting after failed replication task", e);
+        }
+      }
+    }
+
+    public void stop() {
+      running = false;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java
new file mode 100644
index 0000000..9019811
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationTask.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.replication;
+
+import java.time.Instant;
+import java.util.List;
+import java.util.Objects;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+
+/**
+ * The task to download a container from the sources.
+ */
+public class ReplicationTask {
+
+  private volatile Status status = Status.QUEUED;
+
+  private final long containerId;
+
+  private List<DatanodeDetails> sources;
+
+  private final Instant queued = Instant.now();
+
+  public ReplicationTask(long containerId,
+      List<DatanodeDetails> sources) {
+    this.containerId = containerId;
+    this.sources = sources;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    ReplicationTask that = (ReplicationTask) o;
+    return containerId == that.containerId;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(containerId);
+  }
+
+  public long getContainerId() {
+    return containerId;
+  }
+
+  public List<DatanodeDetails> getSources() {
+    return sources;
+  }
+
+  public Status getStatus() {
+    return status;
+  }
+
+  public void setStatus(
+      Status status) {
+    this.status = status;
+  }
+
+  @Override
+  public String toString() {
+    return "ReplicationTask{" +
+        "status=" + status +
+        ", containerId=" + containerId +
+        ", sources=" + sources +
+        ", queued=" + queued +
+        '}';
+  }
+
+  public Instant getQueued() {
+    return queued;
+  }
+
+  /**
+   * Status of the replication.
+   */
+  public enum Status {
+    QUEUED,
+    DOWNLOADING,
+    FAILED,
+    DONE
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
deleted file mode 100644
index 6529922..0000000
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestReplicateContainerCommandHandler.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.replication.ContainerDownloader;
-import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.TestGenericTestUtils;
-
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test replication command handler.
- */
-public class TestReplicateContainerCommandHandler {
-
-  private static final String EXCEPTION_MESSAGE = "Oh my god";
-  private ReplicateContainerCommandHandler handler;
-  private StubDownloader downloader;
-  private ReplicateContainerCommand command;
-  private List<Long> importedContainerIds;
-
-  @Before
-  public void init() {
-    importedContainerIds = new ArrayList<>();
-
-    OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerSet containerSet = Mockito.mock(ContainerSet.class);
-    ContainerDispatcher containerDispatcher =
-        Mockito.mock(ContainerDispatcher.class);
-
-    downloader = new StubDownloader();
-
-    handler = new ReplicateContainerCommandHandler(conf, containerSet,
-        containerDispatcher, downloader) {
-      @Override
-      protected void importContainer(long containerID, Path tarFilePath) {
-        importedContainerIds.add(containerID);
-      }
-    };
-
-    //the command
-    ArrayList<DatanodeDetails> datanodeDetails = new ArrayList<>();
-    datanodeDetails.add(Mockito.mock(DatanodeDetails.class));
-    datanodeDetails.add(Mockito.mock(DatanodeDetails.class));
-
-    command = new ReplicateContainerCommand(1L, datanodeDetails);
-  }
-
-  @Test
-  public void handle() throws TimeoutException, InterruptedException {
-    //GIVEN
-
-    //WHEN
-    handler.handle(command, null, Mockito.mock(StateContext.class), null);
-
-    TestGenericTestUtils
-        .waitFor(() -> downloader.futureByContainers.size() == 1, 100, 2000);
-
-    Assert.assertNotNull(downloader.futureByContainers.get(1L));
-    downloader.futureByContainers.get(1L).complete(Paths.get("/tmp/test"));
-
-    TestGenericTestUtils
-        .waitFor(() -> importedContainerIds.size() == 1, 100, 2000);
-  }
-
-  @Test
-  public void handleWithErrors() throws TimeoutException, InterruptedException {
-    //GIVEN
-    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
-        .captureLogs(ReplicateContainerCommandHandler.LOG);
-
-    //WHEN
-    handler.handle(command, null, Mockito.mock(StateContext.class), null);
-
-    //THEN
-    TestGenericTestUtils
-        .waitFor(() -> downloader.futureByContainers.size() == 1, 100, 2000);
-
-    Assert.assertNotNull(downloader.futureByContainers.get(1L));
-    downloader.futureByContainers.get(1L)
-        .completeExceptionally(new IllegalArgumentException(
-            EXCEPTION_MESSAGE));
-
-    TestGenericTestUtils
-        .waitFor(() -> {
-          String output = logCapturer.getOutput();
-          return output.contains("unsuccessful") && output
-            .contains(EXCEPTION_MESSAGE); },
-            100,
-            2000);
-  }
-
-  /**
-   * Can't handle a command if there are no source replicas.
-   */
-  @Test(expected = IllegalArgumentException.class)
-  public void handleWithoutReplicas()
-      throws TimeoutException, InterruptedException {
-    //GIVEN
-    ReplicateContainerCommand commandWithoutReplicas =
-        new ReplicateContainerCommand(1L, new ArrayList<>());
-
-    //WHEN
-    handler
-        .handle(commandWithoutReplicas,
-            null,
-            Mockito.mock(StateContext.class),
-            null);
-
-  }
-  private static class StubDownloader implements ContainerDownloader {
-
-    private Map<Long, CompletableFuture<Path>> futureByContainers =
-        new HashMap<>();
-
-    @Override
-    public void close() {
-
-    }
-
-    @Override
-    public CompletableFuture<Path> getContainerDataFromReplicas(
-        long containerId, List<DatanodeDetails> sources) {
-      CompletableFuture<Path> future = new CompletableFuture<>();
-      futureByContainers.put(containerId, future);
-      return future;
-    }
-  }
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
new file mode 100644
index 0000000..d433319
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.replication;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Test the replication supervisor.
+ */
+public class TestReplicationSupervisor {
+
+  private OzoneConfiguration conf = new OzoneConfiguration();
+
+  @Test
+  public void normal() {
+    //GIVEN
+    ContainerSet set = new ContainerSet();
+
+    FakeReplicator replicator = new FakeReplicator(set);
+    ReplicationSupervisor supervisor =
+        new ReplicationSupervisor(set, replicator, 5);
+
+    List<DatanodeDetails> datanodes = IntStream.range(1, 3)
+        .mapToObj(v -> Mockito.mock(DatanodeDetails.class))
+        .collect(Collectors.toList());
+
+    try {
+      supervisor.start();
+      //WHEN
+      supervisor.addTask(new ReplicationTask(1L, datanodes));
+      supervisor.addTask(new ReplicationTask(1L, datanodes));
+      supervisor.addTask(new ReplicationTask(1L, datanodes));
+      supervisor.addTask(new ReplicationTask(2L, datanodes));
+      supervisor.addTask(new ReplicationTask(2L, datanodes));
+      supervisor.addTask(new ReplicationTask(3L, datanodes));
+      try {
+        Thread.sleep(300);
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+      //THEN
+      System.out.println(replicator.replicated.get(0));
+
+      Assert
+          .assertEquals(3, replicator.replicated.size());
+
+    } finally {
+      supervisor.stop();
+    }
+  }
+
+  @Test
+  public void duplicateMessageAfterAWhile() throws InterruptedException {
+    //GIVEN
+    ContainerSet set = new ContainerSet();
+
+    FakeReplicator replicator = new FakeReplicator(set);
+    ReplicationSupervisor supervisor =
+        new ReplicationSupervisor(set, replicator, 2);
+
+    List<DatanodeDetails> datanodes = IntStream.range(1, 3)
+        .mapToObj(v -> Mockito.mock(DatanodeDetails.class))
+        .collect(Collectors.toList());
+
+    try {
+      supervisor.start();
+      //WHEN
+      supervisor.addTask(new ReplicationTask(1L, datanodes));
+      Thread.sleep(400);
+      supervisor.addTask(new ReplicationTask(1L, datanodes));
+      Thread.sleep(300);
+
+      //THEN
+      System.out.println(replicator.replicated.get(0));
+
+      Assert
+          .assertEquals(1, replicator.replicated.size());
+
+      //the last item is still in the queue as we cleanup the queue during the
+      // selection
+      Assert.assertEquals(1, supervisor.getQueueSize());
+
+    } finally {
+      supervisor.stop();
+    }
+  }
+
+  private class FakeReplicator implements ContainerReplicator {
+
+    private List<ReplicationTask> replicated = new ArrayList<>();
+
+    private ContainerSet containerSet;
+
+    FakeReplicator(ContainerSet set) {
+      this.containerSet = set;
+    }
+
+    @Override
+    public void replicate(ReplicationTask task) {
+      KeyValueContainerData kvcd =
+          new KeyValueContainerData(task.getContainerId(), 100L);
+      KeyValueContainer kvc =
+          new KeyValueContainer(kvcd, conf);
+      try {
+        //download is slow
+        Thread.sleep(100);
+        replicated.add(task);
+        containerSet.addContainer(kvc);
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042bf74d/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
new file mode 100644
index 0000000..5c905e0
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Tests for the container replication.
+ */
+package org.apache.hadoop.ozone.container.replication;
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDDS-502. Exception in OM startup when running unit tests. Contributed by Arpit Agarwal.

Posted by xy...@apache.org.
HDDS-502. Exception in OM startup when running unit tests. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15ed74fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15ed74fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15ed74fa

Branch: refs/heads/HDDS-4
Commit: 15ed74fa244eacbe7a8eac877ea64d7a53a1bf9c
Parents: c0956ee
Author: Nanda kumar <na...@apache.org>
Authored: Wed Sep 19 19:46:25 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Wed Sep 19 19:46:25 2018 +0530

----------------------------------------------------------------------
 .../scm/server/StorageContainerManager.java     | 55 ++++++++++++++++----
 .../apache/hadoop/ozone/om/OzoneManager.java    | 33 ++++++++++--
 2 files changed, 73 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ed74fa/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 2169149..86c061b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -374,7 +374,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         hParser.printGenericCommandUsage(System.err);
         System.exit(1);
       }
-      StorageContainerManager scm = createSCM(hParser.getRemainingArgs(), conf);
+      StorageContainerManager scm = createSCM(
+          hParser.getRemainingArgs(), conf, true);
       if (scm != null) {
         scm.start();
         scm.join();
@@ -389,9 +390,37 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     out.println(USAGE + "\n");
   }
 
-  public static StorageContainerManager createSCM(String[] args,
-      OzoneConfiguration conf)
-      throws IOException {
+  /**
+   * Create an SCM instance based on the supplied command-line arguments.
+   *
+   * This method is intended for unit tests only. It suppresses the
+   * startup/shutdown message and skips registering Unix signal
+   * handlers.
+   *
+   * @param args command line arguments.
+   * @param conf HDDS configuration
+   * @return SCM instance
+   * @throws IOException
+   */
+  @VisibleForTesting
+  public static StorageContainerManager createSCM(
+      String[] args, OzoneConfiguration conf) throws IOException {
+    return createSCM(args, conf, false);
+  }
+
+  /**
+   * Create an SCM instance based on the supplied command-line arguments.
+   *
+   * @param args command-line arguments.
+   * @param conf HDDS configuration
+   * @param printBanner if true, then log a verbose startup message.
+   * @return SCM instance
+   * @throws IOException
+   */
+  private static StorageContainerManager createSCM(
+      String[] args,
+      OzoneConfiguration conf,
+      boolean printBanner) throws IOException {
     String[] argv = (args == null) ? new String[0] : args;
     if (!HddsUtils.isHddsEnabled(conf)) {
       System.err.println(
@@ -407,13 +436,17 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     }
     switch (startOpt) {
     case INIT:
-      StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-          LOG);
+      if (printBanner) {
+        StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+            LOG);
+      }
       terminate(scmInit(conf) ? 0 : 1);
       return null;
     case GENCLUSTERID:
-      StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-          LOG);
+      if (printBanner) {
+        StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+            LOG);
+      }
       System.out.println("Generating new cluster id:");
       System.out.println(StorageInfo.newClusterID());
       terminate(0);
@@ -423,8 +456,10 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
       terminate(0);
       return null;
     default:
-      StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
-          LOG);
+      if (printBanner) {
+        StringUtils.startupShutdownMessage(StorageContainerManager.class, argv,
+            LOG);
+      }
       return new StorageContainerManager(conf);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ed74fa/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 71cc6ba..6ea0fe7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -258,7 +258,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
         hParser.printGenericCommandUsage(System.err);
         System.exit(1);
       }
-      OzoneManager om = createOm(hParser.getRemainingArgs(), conf);
+      OzoneManager om = createOm(hParser.getRemainingArgs(), conf, true);
       if (om != null) {
         om.start();
         om.join();
@@ -276,14 +276,33 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   /**
    * Constructs OM instance based on command line arguments.
    *
+   * This method is intended for unit tests only. It suppresses the
+   * startup/shutdown message and skips registering Unix signal
+   * handlers.
+   *
    * @param argv Command line arguments
    * @param conf OzoneConfiguration
    * @return OM instance
    * @throws IOException in case OM instance creation fails.
    */
+  @VisibleForTesting
+  public static OzoneManager createOm(
+      String[] argv, OzoneConfiguration conf) throws IOException {
+    return createOm(argv, conf, false);
+  }
 
-  public static OzoneManager createOm(String[] argv,
-      OzoneConfiguration conf) throws IOException {
+
+  /**
+   * Constructs OM instance based on command line arguments.
+   *
+   * @param argv Command line arguments
+   * @param conf OzoneConfiguration
+   * @param printBanner if true then log a verbose startup message.
+   * @return OM instance
+   * @throws IOException in case OM instance creation fails.
+   */
+  private static OzoneManager createOm(String[] argv,
+      OzoneConfiguration conf, boolean printBanner) throws IOException {
     if (!isHddsEnabled(conf)) {
       System.err.println("OM cannot be started in secure mode or when " +
           OZONE_ENABLED + " is set to false");
@@ -297,7 +316,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     }
     switch (startOpt) {
     case CREATEOBJECTSTORE:
-      StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
+      if (printBanner) {
+        StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
+      }
       terminate(omInit(conf) ? 0 : 1);
       return null;
     case HELP:
@@ -308,7 +329,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       if (argv == null) {
         argv = new String[]{};
       }
-      StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
+      if (printBanner) {
+        StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
+      }
       return new OzoneManager(conf);
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
new file mode 100644
index 0000000..9df4249
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * Tests to test block deleting service.
+ */
+public class TestBlockData {
+  static final Logger LOG = LoggerFactory.getLogger(TestBlockData.class);
+  @Rule
+  public TestRule timeout = new Timeout(10000);
+
+  static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset,
+      long len) {
+    return ContainerProtos.ChunkInfo.newBuilder()
+        .setChunkName(name).setOffset(offset).setLen(len).build();
+  }
+
+  @Test
+  public void testAddAndRemove() {
+    final BlockData computed = new BlockData(null);
+    final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
+
+    assertChunks(expected, computed);
+    long offset = 0;
+    int n = 5;
+    for(int i = 0; i < n; i++) {
+      offset += assertAddChunk(expected, computed, offset);
+    }
+
+    for(; !expected.isEmpty();) {
+      removeChunk(expected, computed);
+    }
+  }
+
+  private static int chunkCount = 0;
+  static ContainerProtos.ChunkInfo addChunk(
+      List<ContainerProtos.ChunkInfo> expected, long offset) {
+    final long length = ThreadLocalRandom.current().nextLong(1000);
+    final ContainerProtos.ChunkInfo info =
+        buildChunkInfo("c" + ++chunkCount, offset, length);
+    expected.add(info);
+    return info;
+  }
+
+  static long assertAddChunk(List<ContainerProtos.ChunkInfo> expected,
+      BlockData computed, long offset) {
+    final ContainerProtos.ChunkInfo info = addChunk(expected, offset);
+    LOG.info("addChunk: " + toString(info));
+    computed.addChunk(info);
+    assertChunks(expected, computed);
+    return info.getLen();
+  }
+
+
+  static void removeChunk(List<ContainerProtos.ChunkInfo> expected,
+      BlockData computed) {
+    final int i = ThreadLocalRandom.current().nextInt(expected.size());
+    final ContainerProtos.ChunkInfo info = expected.remove(i);
+    LOG.info("removeChunk: " + toString(info));
+    computed.removeChunk(info);
+    assertChunks(expected, computed);
+  }
+
+  static void assertChunks(List<ContainerProtos.ChunkInfo> expected,
+      BlockData computed) {
+    final List<ContainerProtos.ChunkInfo> computedChunks = computed.getChunks();
+    Assert.assertEquals("expected=" + expected + "\ncomputed=" +
+        computedChunks, expected, computedChunks);
+    Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(),
+        computed.getSize());
+  }
+
+  static String toString(ContainerProtos.ChunkInfo info) {
+    return info.getChunkName() + ":" + info.getOffset() + "," + info.getLen();
+  }
+
+  static String toString(List<ContainerProtos.ChunkInfo> infos) {
+    return infos.stream().map(TestBlockData::toString)
+        .reduce((left, right) -> left + ", " + right)
+        .orElse("");
+  }
+
+  @Test
+  public void testSetChunks() {
+    final BlockData computed = new BlockData(null);
+    final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
+
+    assertChunks(expected, computed);
+    long offset = 0;
+    int n = 5;
+    for(int i = 0; i < n; i++) {
+      offset += addChunk(expected, offset).getLen();
+      LOG.info("setChunk: " + toString(expected));
+      computed.setChunks(expected);
+      assertChunks(expected, computed);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
deleted file mode 100644
index f57fe99..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestKeyData.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ThreadLocalRandom;
-
-/**
- * Tests to test block deleting service.
- */
-public class TestKeyData {
-  static final Logger LOG = LoggerFactory.getLogger(TestKeyData.class);
-  @Rule
-  public TestRule timeout = new Timeout(10000);
-
-  static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset, long len) {
-    return ContainerProtos.ChunkInfo.newBuilder()
-        .setChunkName(name).setOffset(offset).setLen(len).build();
-  }
-
-  @Test
-  public void testAddAndRemove() {
-    final KeyData computed = new KeyData(null);
-    final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
-
-    assertChunks(expected, computed);
-    long offset = 0;
-    int n = 5;
-    for(int i = 0; i < n; i++) {
-      offset += assertAddChunk(expected, computed, offset);
-    }
-
-    for(; !expected.isEmpty(); ) {
-      removeChunk(expected, computed);
-    }
-  }
-
-  private static int chunkCount = 0;
-  static ContainerProtos.ChunkInfo addChunk(List<ContainerProtos.ChunkInfo> expected, long offset) {
-    final long length = ThreadLocalRandom.current().nextLong(1000);
-    final ContainerProtos.ChunkInfo info = buildChunkInfo("c" + ++chunkCount, offset, length);
-    expected.add(info);
-    return info;
-  }
-
-  static long assertAddChunk(List<ContainerProtos.ChunkInfo> expected, KeyData computed, long offset) {
-    final ContainerProtos.ChunkInfo info = addChunk(expected, offset);
-    LOG.info("addChunk: " + toString(info));
-    computed.addChunk(info);
-    assertChunks(expected, computed);
-    return info.getLen();
-  }
-
-
-  static void removeChunk(List<ContainerProtos.ChunkInfo> expected, KeyData computed) {
-    final int i = ThreadLocalRandom.current().nextInt(expected.size());
-    final ContainerProtos.ChunkInfo info = expected.remove(i);
-    LOG.info("removeChunk: " + toString(info));
-    computed.removeChunk(info);
-    assertChunks(expected, computed);
-  }
-
-  static void assertChunks(List<ContainerProtos.ChunkInfo> expected, KeyData computed) {
-    final List<ContainerProtos.ChunkInfo> computedChunks = computed.getChunks();
-    Assert.assertEquals("expected=" + expected + "\ncomputed=" + computedChunks, expected, computedChunks);
-    Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(), computed.getSize());
-  }
-
-  static String toString(ContainerProtos.ChunkInfo info) {
-    return info.getChunkName() + ":" + info.getOffset() + "," + info.getLen();
-  }
-
-  static String toString(List<ContainerProtos.ChunkInfo> infos) {
-    return infos.stream().map(TestKeyData::toString)
-        .reduce((left, right) -> left + ", " + right)
-        .orElse("");
-  }
-
-  @Test
-  public void testSetChunks() {
-    final KeyData computed = new KeyData(null);
-    final List<ContainerProtos.ChunkInfo> expected = new ArrayList<>();
-
-    assertChunks(expected, computed);
-    long offset = 0;
-    int n = 5;
-    for(int i = 0; i < n; i++) {
-      offset += addChunk(expected, offset).getLen();
-      LOG.info("setChunk: " + toString(expected));
-      computed.setChunks(expected);
-      assertChunks(expected, computed);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
index 73fa70d..78bf008 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestCloseContainerHandler.java
@@ -25,12 +25,12 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.junit.AfterClass;
@@ -71,14 +71,14 @@ public class TestCloseContainerHandler {
 
   private final static String DATANODE_UUID = UUID.randomUUID().toString();
 
-  private static final String baseDir = MiniDFSCluster.getBaseDirectory();
-  private static final String volume1 = baseDir + "disk1";
-  private static final String volume2 = baseDir + "disk2";
+  private static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
+  private static final String VOLUME_1 = BASE_DIR + "disk1";
+  private static final String VOLUME_2 = BASE_DIR + "disk2";
 
   @BeforeClass
   public static void setup() throws Exception {
     conf = new Configuration();
-    String dataDirKey = volume1 + "," + volume2;
+    String dataDirKey = VOLUME_1 + "," + VOLUME_2;
     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
     containerSet = new ContainerSet();
     DatanodeDetails datanodeDetails =
@@ -160,31 +160,31 @@ public class TestCloseContainerHandler {
         getTestBlockID(testContainerID);
     Pipeline pipeline = createSingleNodePipeline();
     List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
-    // the key should exist in the map
+    // the block should exist in the map
     Assert.assertNotNull(
-        openContainerBlockMap.getKeyDataMap(testContainerID)
+        openContainerBlockMap.getBlockDataMap(testContainerID)
             .get(blockID.getLocalID()));
-    KeyData keyData = new KeyData(blockID);
+    BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
     for (ChunkInfo i : chunkList) {
       chunkProtoList.add(i.getProtoBufMessage());
     }
-    keyData.setChunks(chunkProtoList);
-    ContainerProtos.PutKeyRequestProto.Builder putKeyRequestProto =
-        ContainerProtos.PutKeyRequestProto.newBuilder();
-    putKeyRequestProto.setKeyData(keyData.getProtoBufMessage());
+    blockData.setChunks(chunkProtoList);
+    ContainerProtos.PutBlockRequestProto.Builder putBlockRequestProto =
+        ContainerProtos.PutBlockRequestProto.newBuilder();
+    putBlockRequestProto.setBlockData(blockData.getProtoBufMessage());
     ContainerProtos.ContainerCommandRequestProto.Builder request =
         ContainerProtos.ContainerCommandRequestProto.newBuilder();
-    request.setCmdType(ContainerProtos.Type.PutKey);
+    request.setCmdType(ContainerProtos.Type.PutBlock);
     request.setContainerID(blockID.getContainerID());
-    request.setPutKey(putKeyRequestProto);
+    request.setPutBlock(putBlockRequestProto);
     request.setTraceID(UUID.randomUUID().toString());
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     dispatcher.dispatch(request.build());
 
-    //the open key should be removed from Map
+    //the open block should be removed from Map
     Assert.assertNull(
-        openContainerBlockMap.getKeyDataMap(testContainerID));
+        openContainerBlockMap.getBlockDataMap(testContainerID));
   }
 
   @Test
@@ -197,10 +197,10 @@ public class TestCloseContainerHandler {
     List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
     // the key should exist in the map
     Assert.assertNotNull(
-        openContainerBlockMap.getKeyDataMap(testContainerID)
+        openContainerBlockMap.getBlockDataMap(testContainerID)
             .get(blockID.getLocalID()));
     Assert.assertTrue(
-        openContainerBlockMap.getKeyDataMap(testContainerID)
+        openContainerBlockMap.getBlockDataMap(testContainerID)
             .get(blockID.getLocalID()).getChunks().size() == 3);
     ContainerProtos.DeleteChunkRequestProto.Builder deleteChunkProto =
         ContainerProtos.DeleteChunkRequestProto.newBuilder();
@@ -220,7 +220,7 @@ public class TestCloseContainerHandler {
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     dispatcher.dispatch(request.build());
     Assert.assertTrue(
-        openContainerBlockMap.getKeyDataMap(testContainerID)
+        openContainerBlockMap.getBlockDataMap(testContainerID)
             .get(blockID.getLocalID()).getChunks().size() == 2);
 
   }
@@ -235,14 +235,14 @@ public class TestCloseContainerHandler {
     List<ChunkInfo> chunkList = writeChunkBuilder(blockID, pipeline, 3);
 
     Container container = containerSet.getContainer(testContainerID);
-    KeyData keyData = openContainerBlockMap.
-        getKeyDataMap(testContainerID).get(blockID.getLocalID());
+    BlockData blockData = openContainerBlockMap.
+        getBlockDataMap(testContainerID).get(blockID.getLocalID());
     // the key should exist in the map
     Assert.assertNotNull(
-        openContainerBlockMap.getKeyDataMap(testContainerID)
+        openContainerBlockMap.getBlockDataMap(testContainerID)
             .get(blockID.getLocalID()));
     Assert.assertTrue(
-        keyData.getChunks().size() == chunkList.size());
+        blockData.getChunks().size() == chunkList.size());
     ContainerProtos.ContainerCommandRequestProto.Builder request =
         ContainerProtos.ContainerCommandRequestProto.newBuilder();
     request.setCmdType(ContainerProtos.Type.CloseContainer);
@@ -253,8 +253,9 @@ public class TestCloseContainerHandler {
     request.setDatanodeUuid(pipeline.getLeader().getUuidString());
     dispatcher.dispatch(request.build());
     Assert.assertNull(
-        openContainerBlockMap.getKeyDataMap(testContainerID));
+        openContainerBlockMap.getBlockDataMap(testContainerID));
     // Make sure the key got committed
-    Assert.assertNotNull(handler.getKeyManager().getKey(container, blockID));
+    Assert.assertNotNull(handler.getBlockManager()
+        .getBlock(container, blockID));
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 3a36331..b6cdc9d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -29,20 +29,20 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.utils.MetadataStore;
 import org.junit.After;
@@ -97,7 +97,7 @@ public class TestContainerPersistence {
   private static ContainerSet containerSet;
   private static VolumeSet volumeSet;
   private static VolumeChoosingPolicy volumeChoosingPolicy;
-  private static KeyManager keyManager;
+  private static BlockManager blockManager;
   private static ChunkManager chunkManager;
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -126,7 +126,7 @@ public class TestContainerPersistence {
   public void setupPaths() throws IOException {
     containerSet = new ContainerSet();
     volumeSet = new VolumeSet(DATANODE_UUID, conf);
-    keyManager = new KeyManagerImpl(conf);
+    blockManager = new BlockManagerImpl(conf);
     chunkManager = new ChunkManagerImpl();
 
     for (String dir : conf.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
@@ -152,15 +152,15 @@ public class TestContainerPersistence {
     return ContainerTestHelper.getTestContainerID();
   }
 
-  private Container addContainer(ContainerSet containerSet, long containerID)
+  private Container addContainer(ContainerSet cSet, long cID)
       throws IOException {
-    KeyValueContainerData data = new KeyValueContainerData(containerID,
+    KeyValueContainerData data = new KeyValueContainerData(cID,
         ContainerTestHelper.CONTAINER_MAX_SIZE);
     data.addMetadata("VOLUME", "shire");
     data.addMetadata("owner)", "bilbo");
     KeyValueContainer container = new KeyValueContainer(data, conf);
     container.create(volumeSet, volumeChoosingPolicy, SCM_ID);
-    containerSet.addContainer(container);
+    cSet.addContainer(container);
     return container;
   }
 
@@ -184,7 +184,7 @@ public class TestContainerPersistence {
 
     MetadataStore store = null;
     try {
-      store = KeyUtils.getDB(kvData, conf);
+      store = BlockUtils.getDB(kvData, conf);
       Assert.assertNotNull(store);
     } finally {
       if (store != null) {
@@ -227,19 +227,19 @@ public class TestContainerPersistence {
     Assert.assertFalse(containerSet.getContainerMap()
         .containsKey(testContainerID1));
 
-    // Adding key to a deleted container should fail.
+    // Adding block to a deleted container should fail.
     exception.expect(StorageContainerException.class);
     exception.expectMessage("Error opening DB.");
     BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID1);
-    KeyData someKey1 = new KeyData(blockID1);
+    BlockData someKey1 = new BlockData(blockID1);
     someKey1.setChunks(new LinkedList<ContainerProtos.ChunkInfo>());
-    keyManager.putKey(container1, someKey1);
+    blockManager.putBlock(container1, someKey1);
 
     // Deleting a non-empty container should fail.
     BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID2);
-    KeyData someKey2 = new KeyData(blockID2);
+    BlockData someKey2 = new BlockData(blockID2);
     someKey2.setChunks(new LinkedList<ContainerProtos.ChunkInfo>());
-    keyManager.putKey(container2, someKey2);
+    blockManager.putBlock(container2, someKey2);
 
     exception.expect(StorageContainerException.class);
     exception.expectMessage(
@@ -325,7 +325,8 @@ public class TestContainerPersistence {
     if (container == null) {
       container = addContainer(containerSet, testContainerID);
     }
-    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
+    ChunkInfo info = getChunk(
+        blockID.getLocalID(), 0, 0, datalen);
     byte[] data = getData(datalen);
     setDataChecksum(info, data);
     chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -348,8 +349,8 @@ public class TestContainerPersistence {
   }
 
   /**
-   * Writes many chunks of the same key into different chunk files and verifies
-   * that we have that data in many files.
+   * Writes many chunks of the same block into different chunk files and
+   * verifies that we have that data in many files.
    *
    * @throws IOException
    * @throws NoSuchAlgorithmException
@@ -425,7 +426,8 @@ public class TestContainerPersistence {
     Container container = addContainer(containerSet, testContainerID);
 
     BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
+    ChunkInfo info = getChunk(
+        blockID.getLocalID(), 0, 0, datalen);
     byte[] data = getData(datalen);
     setDataChecksum(info, data);
     chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -456,7 +458,8 @@ public class TestContainerPersistence {
     Container container = addContainer(containerSet, testContainerID);
 
     BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
+    ChunkInfo info = getChunk(
+        blockID.getLocalID(), 0, 0, datalen);
     byte[] data = getData(datalen);
     setDataChecksum(info, data);
     chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -500,7 +503,8 @@ public class TestContainerPersistence {
     for (int x = 0; x < chunkCount; x++) {
       // we are writing to the same chunk file but at different offsets.
       long offset = x * datalen;
-      ChunkInfo info = getChunk(blockID.getLocalID(), 0, offset, datalen);
+      ChunkInfo info = getChunk(
+          blockID.getLocalID(), 0, offset, datalen);
       byte[] data = getData(datalen);
       oldSha.update(data);
       setDataChecksum(info, data);
@@ -531,7 +535,8 @@ public class TestContainerPersistence {
     Container container = addContainer(containerSet, testContainerID);
 
     BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
-    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
+    ChunkInfo info = getChunk(
+        blockID.getLocalID(), 0, 0, datalen);
     byte[] data = getData(datalen);
     setDataChecksum(info, data);
     chunkManager.writeChunk(container, blockID, info, data, COMBINED);
@@ -542,37 +547,38 @@ public class TestContainerPersistence {
   }
 
   /**
-   * Tests a put key and read key.
+   * Tests a put block and read block.
    *
    * @throws IOException
    * @throws NoSuchAlgorithmException
    */
   @Test
-  public void testPutKey() throws IOException, NoSuchAlgorithmException {
+  public void testPutBlock() throws IOException, NoSuchAlgorithmException {
     long testContainerID = getTestContainerID();
     Container container = addContainer(containerSet, testContainerID);
 
     BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
     ChunkInfo info = writeChunkHelper(blockID);
-    KeyData keyData = new KeyData(blockID);
+    BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
     chunkList.add(info.getProtoBufMessage());
-    keyData.setChunks(chunkList);
-    keyManager.putKey(container, keyData);
-    KeyData readKeyData = keyManager.getKey(container, keyData.getBlockID());
+    blockData.setChunks(chunkList);
+    blockManager.putBlock(container, blockData);
+    BlockData readBlockData = blockManager.
+        getBlock(container, blockData.getBlockID());
     ChunkInfo readChunk =
-        ChunkInfo.getFromProtoBuf(readKeyData.getChunks().get(0));
+        ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
     Assert.assertEquals(info.getChecksum(), readChunk.getChecksum());
   }
 
   /**
-   * Tests a put key and read key.
+   * Tests a put block and read block.
    *
    * @throws IOException
    * @throws NoSuchAlgorithmException
    */
   @Test
-  public void testPutKeyWithLotsOfChunks() throws IOException,
+  public void testPutBlockWithLotsOfChunks() throws IOException,
       NoSuchAlgorithmException {
     final int chunkCount = 2;
     final int datalen = 1024;
@@ -603,66 +609,67 @@ public class TestContainerPersistence {
     long writeCount = container.getContainerData().getWriteCount();
     Assert.assertEquals(chunkCount, writeCount);
 
-    KeyData keyData = new KeyData(blockID);
+    BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
     for (ChunkInfo i : chunkList) {
       chunkProtoList.add(i.getProtoBufMessage());
     }
-    keyData.setChunks(chunkProtoList);
-    keyManager.putKey(container, keyData);
-    KeyData readKeyData = keyManager.getKey(container, keyData.getBlockID());
+    blockData.setChunks(chunkProtoList);
+    blockManager.putBlock(container, blockData);
+    BlockData readBlockData = blockManager.
+        getBlock(container, blockData.getBlockID());
     ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
     ChunkInfo readChunk =
-        ChunkInfo.getFromProtoBuf(readKeyData.getChunks().get(readKeyData
+        ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData
             .getChunks().size() - 1));
     Assert.assertEquals(lastChunk.getChecksum(), readChunk.getChecksum());
   }
 
   /**
-   * Deletes a key and tries to read it back.
+   * Deletes a block and tries to read it back.
    *
    * @throws IOException
    * @throws NoSuchAlgorithmException
    */
   @Test
-  public void testDeleteKey() throws IOException, NoSuchAlgorithmException {
+  public void testDeleteBlock() throws IOException, NoSuchAlgorithmException {
     long testContainerID = getTestContainerID();
     Container container = addContainer(containerSet, testContainerID);
     BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
     ChunkInfo info = writeChunkHelper(blockID);
-    KeyData keyData = new KeyData(blockID);
+    BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
     chunkList.add(info.getProtoBufMessage());
-    keyData.setChunks(chunkList);
-    keyManager.putKey(container, keyData);
-    keyManager.deleteKey(container, blockID);
+    blockData.setChunks(chunkList);
+    blockManager.putBlock(container, blockData);
+    blockManager.deleteBlock(container, blockID);
     exception.expect(StorageContainerException.class);
-    exception.expectMessage("Unable to find the key.");
-    keyManager.getKey(container, keyData.getBlockID());
+    exception.expectMessage("Unable to find the block.");
+    blockManager.getBlock(container, blockData.getBlockID());
   }
 
   /**
-   * Tries to Deletes a key twice.
+   * Tries to Deletes a block twice.
    *
    * @throws IOException
    * @throws NoSuchAlgorithmException
    */
   @Test
-  public void testDeleteKeyTwice() throws IOException,
+  public void testDeleteBlockTwice() throws IOException,
       NoSuchAlgorithmException {
     long testContainerID = getTestContainerID();
     Container container = addContainer(containerSet, testContainerID);
     BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
     ChunkInfo info = writeChunkHelper(blockID);
-    KeyData keyData = new KeyData(blockID);
+    BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
     chunkList.add(info.getProtoBufMessage());
-    keyData.setChunks(chunkList);
-    keyManager.putKey(container, keyData);
-    keyManager.deleteKey(container, blockID);
+    blockData.setChunks(chunkList);
+    blockManager.putBlock(container, blockData);
+    blockManager.deleteBlock(container, blockID);
     exception.expect(StorageContainerException.class);
-    exception.expectMessage("Unable to find the key.");
-    keyManager.deleteKey(container, blockID);
+    exception.expectMessage("Unable to find the block.");
+    blockManager.deleteBlock(container, blockID);
   }
 
   /**
@@ -722,8 +729,9 @@ public class TestContainerPersistence {
     try {
       container.update(newMetadata, false);
     } catch (StorageContainerException ex) {
-      Assert.assertEquals("Updating a closed container without force option " +
-          "is not allowed. ContainerID: " + testContainerID, ex.getMessage());
+      Assert.assertEquals("Updating a closed container without " +
+          "force option is not allowed. ContainerID: " +
+          testContainerID, ex.getMessage());
     }
 
     // Update with force flag, it should be success.
@@ -741,53 +749,55 @@ public class TestContainerPersistence {
 
   }
 
-  private KeyData writeKeyHelper(BlockID blockID)
+  private BlockData writeBlockHelper(BlockID blockID)
       throws IOException, NoSuchAlgorithmException {
     ChunkInfo info = writeChunkHelper(blockID);
-    KeyData keyData = new KeyData(blockID);
+    BlockData blockData = new BlockData(blockID);
     List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
     chunkList.add(info.getProtoBufMessage());
-    keyData.setChunks(chunkList);
-    return keyData;
+    blockData.setChunks(chunkList);
+    return blockData;
   }
 
   @Test
-  public void testListKey() throws Exception {
+  public void testListBlock() throws Exception {
     long testContainerID = getTestContainerID();
     Container container = addContainer(containerSet, testContainerID);
-    List<BlockID> expectedKeys = new ArrayList<>();
+    List<BlockID> expectedBlocks = new ArrayList<>();
     for (int i = 0; i < 10; i++) {
       BlockID blockID = new BlockID(testContainerID, i);
-      expectedKeys.add(blockID);
-      KeyData kd = writeKeyHelper(blockID);
-      keyManager.putKey(container, kd);
+      expectedBlocks.add(blockID);
+      BlockData kd = writeBlockHelper(blockID);
+      blockManager.putBlock(container, kd);
     }
 
-    // List all keys
-    List<KeyData> result = keyManager.listKey(container, 0, 100);
+    // List all blocks
+    List<BlockData> result = blockManager.listBlock(
+        container, 0, 100);
     Assert.assertEquals(10, result.size());
 
     int index = 0;
     for (int i = index; i < result.size(); i++) {
-      KeyData data = result.get(i);
+      BlockData data = result.get(i);
       Assert.assertEquals(testContainerID, data.getContainerID());
-      Assert.assertEquals(expectedKeys.get(i).getLocalID(), data.getLocalID());
+      Assert.assertEquals(expectedBlocks.get(i).getLocalID(),
+          data.getLocalID());
       index++;
     }
 
-    // List key with startKey filter
-    long k6 = expectedKeys.get(6).getLocalID();
-    result = keyManager.listKey(container, k6, 100);
+    // List block with startBlock filter
+    long k6 = expectedBlocks.get(6).getLocalID();
+    result = blockManager.listBlock(container, k6, 100);
 
     Assert.assertEquals(4, result.size());
     for (int i = 6; i < 10; i++) {
-      Assert.assertEquals(expectedKeys.get(i).getLocalID(),
+      Assert.assertEquals(expectedBlocks.get(i).getLocalID(),
           result.get(i - 6).getLocalID());
     }
 
     // Count must be >0
     exception.expect(IllegalArgumentException.class);
     exception.expectMessage("Count must be a positive number.");
-    keyManager.listKey(container, 0, -1);
+    blockManager.listBlock(container, 0, -1);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 3c77687..a129ed0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
@@ -69,6 +69,9 @@ import static org.apache.hadoop.hdds
 import static org.apache.hadoop.ozone
     .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 
+/**
+ * Tests for Block deletion.
+ */
 public class TestBlockDeletion {
   private static OzoneConfiguration conf = null;
   private static ObjectStore store;
@@ -229,7 +232,7 @@ public class TestBlockDeletion {
       throws IOException {
     return OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
       try {
-        MetadataStore db = KeyUtils.getDB((KeyValueContainerData)
+        MetadataStore db = BlockUtils.getDB((KeyValueContainerData)
                 dnContainerSet.getContainer(blockID.getContainerID())
                     .getContainerData(), conf);
         Assert.assertNotNull(db.get(Longs.toByteArray(blockID.getLocalID())));
@@ -244,7 +247,7 @@ public class TestBlockDeletion {
       throws IOException {
     return OzoneTestUtils.performOperationOnKeyContainers((blockID) -> {
       try {
-        MetadataStore db = KeyUtils.getDB((KeyValueContainerData)
+        MetadataStore db = BlockUtils.getDB((KeyValueContainerData)
             dnContainerSet.getContainer(blockID.getContainerID())
                 .getContainerData(), conf);
         Assert.assertNull(db.get(Longs.toByteArray(blockID.getLocalID())));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 5dd88fb..a3c92fb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -68,7 +68,8 @@ public class TestOzoneContainer {
       conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.getRoot().getPath());
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader()
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
-      conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
+      conf.setBoolean(
+          OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
 
       container = new OzoneContainer(TestUtils.randomDatanodeDetails(),
           conf, null);
@@ -129,7 +130,7 @@ public class TestOzoneContainer {
   static void runTestOzoneContainerViaDataNode(
       long testContainerID, XceiverClientSpi client) throws Exception {
     ContainerProtos.ContainerCommandRequestProto
-        request, writeChunkRequest, putKeyRequest,
+        request, writeChunkRequest, putBlockRequest,
         updateRequest1, updateRequest2;
     ContainerProtos.ContainerCommandResponseProto response,
         updateResponse1, updateResponse2;
@@ -138,46 +139,50 @@ public class TestOzoneContainer {
 
       Pipeline pipeline = client.getPipeline();
       createContainerForTesting(client, testContainerID);
-      writeChunkRequest = writeChunkForContainer(client, testContainerID, 1024);
+      writeChunkRequest = writeChunkForContainer(client, testContainerID,
+          1024);
 
       // Read Chunk
-      request = ContainerTestHelper.getReadChunkRequest(pipeline, writeChunkRequest
-          .getWriteChunk());
+      request = ContainerTestHelper.getReadChunkRequest(
+          pipeline, writeChunkRequest.getWriteChunk());
 
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
       Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
-      // Put Key
-      putKeyRequest = ContainerTestHelper.getPutKeyRequest(pipeline, writeChunkRequest
-              .getWriteChunk());
+      // Put Block
+      putBlockRequest = ContainerTestHelper.getPutBlockRequest(
+          pipeline, writeChunkRequest.getWriteChunk());
 
 
-      response = client.sendCommand(putKeyRequest);
+      response = client.sendCommand(putBlockRequest);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-      Assert
-          .assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
+      Assert.assertTrue(putBlockRequest.getTraceID()
+          .equals(response.getTraceID()));
 
-      // Get Key
-      request = ContainerTestHelper.getKeyRequest(pipeline, putKeyRequest.getPutKey());
+      // Get Block
+      request = ContainerTestHelper.
+          getBlockRequest(pipeline, putBlockRequest.getPutBlock());
       response = client.sendCommand(request);
-      int chunksCount = putKeyRequest.getPutKey().getKeyData().getChunksCount();
-      ContainerTestHelper.verifyGetKey(request, response, chunksCount);
+      int chunksCount = putBlockRequest.getPutBlock().getBlockData().
+          getChunksCount();
+      ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
 
 
-      // Delete Key
+      // Delete Block
       request =
-          ContainerTestHelper.getDeleteKeyRequest(pipeline, putKeyRequest.getPutKey());
+          ContainerTestHelper.getDeleteBlockRequest(
+              pipeline, putBlockRequest.getPutBlock());
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
       Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
       //Delete Chunk
-      request = ContainerTestHelper.getDeleteChunkRequest(pipeline, writeChunkRequest
-          .getWriteChunk());
+      request = ContainerTestHelper.getDeleteChunkRequest(
+          pipeline, writeChunkRequest.getWriteChunk());
 
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
@@ -249,7 +254,7 @@ public class TestOzoneContainer {
 
       final ContainerProtos.ContainerCommandRequestProto getSmallFileRequest
           = ContainerTestHelper.getReadSmallFileRequest(client.getPipeline(),
-          smallFileRequest.getPutSmallFile().getKey());
+          smallFileRequest.getPutSmallFile().getBlock());
       response = client.sendCommand(getSmallFileRequest);
       Assert.assertArrayEquals(
           smallFileRequest.getPutSmallFile().getData().toByteArray(),
@@ -269,7 +274,7 @@ public class TestOzoneContainer {
     XceiverClientGrpc client = null;
     ContainerProtos.ContainerCommandResponseProto response;
     ContainerProtos.ContainerCommandRequestProto
-        writeChunkRequest, putKeyRequest, request;
+        writeChunkRequest, putBlockRequest, request;
     try {
 
       OzoneConfiguration conf = newOzoneConfiguration();
@@ -283,18 +288,19 @@ public class TestOzoneContainer {
 
       long containerID = ContainerTestHelper.getTestContainerID();
       createContainerForTesting(client, containerID);
-      writeChunkRequest = writeChunkForContainer(client, containerID, 1024);
+      writeChunkRequest = writeChunkForContainer(client, containerID,
+          1024);
 
 
-      putKeyRequest = ContainerTestHelper.getPutKeyRequest(client.getPipeline(),
-          writeChunkRequest.getWriteChunk());
-      // Put key before closing.
-      response = client.sendCommand(putKeyRequest);
+      putBlockRequest = ContainerTestHelper.getPutBlockRequest(
+          client.getPipeline(), writeChunkRequest.getWriteChunk());
+      // Put block before closing.
+      response = client.sendCommand(putBlockRequest);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
           response.getResult());
       Assert.assertTrue(
-          putKeyRequest.getTraceID().equals(response.getTraceID()));
+          putBlockRequest.getTraceID().equals(response.getTraceID()));
 
       // Close the contianer.
       request = ContainerTestHelper.getCloseContainer(
@@ -325,25 +331,26 @@ public class TestOzoneContainer {
       Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
 
-      // Put key will fail on a closed container.
-      response = client.sendCommand(putKeyRequest);
+      // Put block will fail on a closed container.
+      response = client.sendCommand(putBlockRequest);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
           response.getResult());
-      Assert
-          .assertTrue(putKeyRequest.getTraceID().equals(response.getTraceID()));
+      Assert.assertTrue(putBlockRequest.getTraceID()
+          .equals(response.getTraceID()));
 
-      // Get key must work on the closed container.
-      request = ContainerTestHelper.getKeyRequest(client.getPipeline(),
-          putKeyRequest.getPutKey());
+      // Get block must work on the closed container.
+      request = ContainerTestHelper.getBlockRequest(client.getPipeline(),
+          putBlockRequest.getPutBlock());
       response = client.sendCommand(request);
-      int chunksCount = putKeyRequest.getPutKey().getKeyData().getChunksCount();
-      ContainerTestHelper.verifyGetKey(request, response, chunksCount);
+      int chunksCount = putBlockRequest.getPutBlock().getBlockData()
+          .getChunksCount();
+      ContainerTestHelper.verifyGetBlock(request, response, chunksCount);
 
-      // Delete Key must fail on a closed container.
+      // Delete block must fail on a closed container.
       request =
-          ContainerTestHelper.getDeleteKeyRequest(client.getPipeline(),
-              putKeyRequest.getPutKey());
+          ContainerTestHelper.getDeleteBlockRequest(client.getPipeline(),
+              putBlockRequest.getPutBlock());
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
@@ -365,7 +372,7 @@ public class TestOzoneContainer {
     XceiverClientGrpc client = null;
     ContainerProtos.ContainerCommandResponseProto response;
     ContainerProtos.ContainerCommandRequestProto request,
-        writeChunkRequest, putKeyRequest;
+        writeChunkRequest, putBlockRequest;
     try {
       OzoneConfiguration conf = newOzoneConfiguration();
 
@@ -378,17 +385,18 @@ public class TestOzoneContainer {
 
       long containerID = ContainerTestHelper.getTestContainerID();
       createContainerForTesting(client, containerID);
-      writeChunkRequest = writeChunkForContainer(client, containerID, 1024);
+      writeChunkRequest = writeChunkForContainer(
+          client, containerID, 1024);
 
-      putKeyRequest = ContainerTestHelper.getPutKeyRequest(client.getPipeline(),
-          writeChunkRequest.getWriteChunk());
+      putBlockRequest = ContainerTestHelper.getPutBlockRequest(
+          client.getPipeline(), writeChunkRequest.getWriteChunk());
       // Put key before deleting.
-      response = client.sendCommand(putKeyRequest);
+      response = client.sendCommand(putBlockRequest);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
           response.getResult());
       Assert.assertTrue(
-          putKeyRequest.getTraceID().equals(response.getTraceID()));
+          putBlockRequest.getTraceID().equals(response.getTraceID()));
 
       // Container cannot be deleted forcibly because
       // the container is not closed.
@@ -529,7 +537,7 @@ public class TestOzoneContainer {
       writeChunkForContainer(XceiverClientSpi client,
       long containerID, int dataLen) throws Exception {
     // Write Chunk
-    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);;
+    BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
     ContainerProtos.ContainerCommandRequestProto writeChunkRequest =
         ContainerTestHelper.getWriteChunkRequest(client.getPipeline(),
             blockID, dataLen);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java
index 8c83fd3..c875a7e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerStateMachine.java
@@ -89,7 +89,7 @@ public class TestContainerStateMachine {
 
     // add putKey request
     ContainerCommandRequestProto putKeyProto = ContainerTestHelper
-            .getPutKeyRequest(pipeline, writeChunkProto.getWriteChunk());
+            .getPutBlockRequest(pipeline, writeChunkProto.getWriteChunk());
     RaftClientRequest putKeyRequest = getRaftClientRequest(putKeyProto);
 
     TransactionContext createContainerCtxt =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
index f309715..7144005 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
@@ -1209,8 +1209,8 @@ public class TestOzoneManager {
   //Disabling this test
   @Ignore("Disabling this test until Open Key is fixed.")
   public void testExpiredOpenKey() throws Exception {
-//    BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
-//        .getOzoneManager().getKeyManager()).getOpenKeyCleanupService();
+//    BackgroundService openKeyCleanUpService = ((BlockManagerImpl)cluster
+//        .getOzoneManager().getBlockManager()).getOpenKeyCleanupService();
 
     String userName = "user" + RandomStringUtils.randomNumeric(5);
     String adminName = "admin" + RandomStringUtils.randomNumeric(5);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index a2d95e8..84a4028 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -103,7 +103,7 @@ public class TestContainerSmallFile {
   }
 
   @Test
-  public void testInvalidKeyRead() throws Exception {
+  public void testInvalidBlockRead() throws Exception {
     String traceID = UUID.randomUUID().toString();
     ContainerWithPipeline container =
         storageContainerLocationClient.allocateContainer(
@@ -116,7 +116,7 @@ public class TestContainerSmallFile {
         container.getContainerInfo().getContainerID(), traceID);
 
     thrown.expect(StorageContainerException.class);
-    thrown.expectMessage("Unable to find the key");
+    thrown.expectMessage("Unable to find the block");
 
     BlockID blockID = ContainerTestHelper.getTestBlockID(
         container.getContainerInfo().getContainerID());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
index f82b0d3..08e7808 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java
@@ -107,7 +107,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
     // Now, explicitly make a putKey request for the block.
     ContainerProtos.ContainerCommandRequestProto putKeyRequest =
         ContainerTestHelper
-            .getPutKeyRequest(pipeline, writeChunkRequest.getWriteChunk());
+            .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
     client.sendCommand(putKeyRequest);
     response = ContainerProtocolCalls
         .getCommittedBlockLength(client, blockID, traceID);
@@ -155,7 +155,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
   }
 
   @Test
-  public void tesGetCommittedBlockLengthForInvalidBlock() throws Exception {
+  public void testGetCommittedBlockLengthForInvalidBlock() throws Exception {
     String traceID = UUID.randomUUID().toString();
     ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(xceiverClientManager.getType(),
@@ -174,7 +174,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
       ContainerProtocolCalls.getCommittedBlockLength(client, blockID, traceID);
       Assert.fail("Expected exception not thrown");
     } catch (StorageContainerException sce) {
-      Assert.assertTrue(sce.getMessage().contains("Unable to find the key"));
+      Assert.assertTrue(sce.getMessage().contains("Unable to find the block"));
     }
     xceiverClientManager.releaseClient(client);
   }
@@ -216,7 +216,7 @@ public class TestGetCommittedBlockLengthAndPutKey {
 
   @Test
   public void tesPutKeyResposne() throws Exception {
-    ContainerProtos.PutKeyResponseProto response;
+    ContainerProtos.PutBlockResponseProto response;
     String traceID = UUID.randomUUID().toString();
     ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(xceiverClientManager.getType(),
@@ -239,8 +239,8 @@ public class TestGetCommittedBlockLengthAndPutKey {
     // Now, explicitly make a putKey request for the block.
     ContainerProtos.ContainerCommandRequestProto putKeyRequest =
         ContainerTestHelper
-            .getPutKeyRequest(pipeline, writeChunkRequest.getWriteChunk());
-    response = client.sendCommand(putKeyRequest).getPutKey();
+            .getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
+    response = client.sendCommand(putKeyRequest).getPutBlock();
     // make sure the block ids in the request and response are same.
     // This will also ensure that closing the container committed the block
     // on the Datanodes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 02cd985..7eb2ec2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -44,7 +44,7 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
@@ -278,29 +278,29 @@ public class TestKeys {
   }
 
   static void runTestPutKey(PutHelper helper) throws Exception {
-    final ClientProtocol client = helper.client;
+    final ClientProtocol helperClient = helper.client;
     helper.putKey();
     assertNotNull(helper.getBucket());
     assertNotNull(helper.getFile());
-    List<OzoneKey> keyList = client
+    List<OzoneKey> keyList = helperClient
         .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
             null, 10);
     Assert.assertEquals(1, keyList.size());
 
     // test list key using a more efficient call
     String newkeyName = OzoneUtils.getRequestID().toLowerCase();
-    OzoneOutputStream ozoneOutputStream = client
+    OzoneOutputStream ozoneOutputStream = helperClient
         .createKey(helper.getVol().getName(), helper.getBucket().getName(),
             newkeyName, 0, replicationType, replicationFactor);
     ozoneOutputStream.close();
-    keyList = client
+    keyList = helperClient
         .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
             null, 10);
     Assert.assertEquals(2, keyList.size());
 
     // test new put key with invalid volume/bucket name
     try {
-      ozoneOutputStream = client
+      ozoneOutputStream = helperClient
           .createKey("invalid-volume", helper.getBucket().getName(), newkeyName,
               0, replicationType, replicationFactor);
       ozoneOutputStream.close();
@@ -312,7 +312,7 @@ public class TestKeys {
     }
 
     try {
-      ozoneOutputStream = client
+      ozoneOutputStream = helperClient
           .createKey(helper.getVol().getName(), "invalid-bucket", newkeyName, 0,
               replicationType, replicationFactor);
       ozoneOutputStream.close();
@@ -380,7 +380,7 @@ public class TestKeys {
   }
 
   static void runTestPutAndGetKey(PutHelper helper) throws Exception {
-    final ClientProtocol client = helper.client;
+    final ClientProtocol helperClient = helper.client;
 
     String keyName = helper.putKey();
     assertNotNull(helper.getBucket());
@@ -427,7 +427,8 @@ public class TestKeys {
 
       // test new get key with invalid volume/bucket name
       try {
-        client.getKey("invalid-volume", helper.getBucket().getName(), keyName);
+        helperClient.getKey(
+            "invalid-volume", helper.getBucket().getName(), keyName);
         fail("Get key should have thrown " + "when using invalid volume name.");
       } catch (IOException e) {
         GenericTestUtils
@@ -435,7 +436,8 @@ public class TestKeys {
       }
 
       try {
-        client.getKey(helper.getVol().getName(), "invalid-bucket", keyName);
+        helperClient.getKey(
+            helper.getVol().getName(), "invalid-bucket", keyName);
         fail("Get key should have thrown " + "when using invalid bucket name.");
       } catch (IOException e) {
         GenericTestUtils.assertExceptionContains(
@@ -476,7 +478,7 @@ public class TestKeys {
   }
 
   static void runTestPutAndListKey(PutHelper helper) throws Exception {
-    ClientProtocol client = helper.client;
+    ClientProtocol helperClient = helper.client;
     helper.putKey();
     assertNotNull(helper.getBucket());
     assertNotNull(helper.getFile());
@@ -495,7 +497,7 @@ public class TestKeys {
     List<OzoneKey> keyList1 =
         IteratorUtils.toList(helper.getBucket().listKeys(null, null));
     // test list key using a more efficient call
-    List<OzoneKey> keyList2 = client
+    List<OzoneKey> keyList2 = helperClient
         .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
             null, 100);
 
@@ -515,7 +517,7 @@ public class TestKeys {
     }
 
     // test maxLength parameter of list keys
-    keyList2 = client
+    keyList2 = helperClient
         .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
             null, 1);
     Assert.assertEquals(1, keyList2.size());
@@ -523,7 +525,7 @@ public class TestKeys {
     // test startKey parameter of list keys
     keyList1 = IteratorUtils
         .toList(helper.getBucket().listKeys("list-key", "list-key4"));
-    keyList2 = client
+    keyList2 = helperClient
         .listKeys(helper.getVol().getName(), helper.getBucket().getName(),
             "list-key", "list-key4", 100);
     Assert.assertEquals(5, keyList1.size());
@@ -532,7 +534,7 @@ public class TestKeys {
     // test prefix parameter of list keys
     keyList1 =
         IteratorUtils.toList(helper.getBucket().listKeys("list-key2", null));
-    keyList2 = client
+    keyList2 = helperClient
         .listKeys(helper.getVol().getName(), helper.getBucket().getName(),
             "list-key2", null, 100);
     Assert.assertTrue(
@@ -542,7 +544,7 @@ public class TestKeys {
 
     // test new list keys with invalid volume/bucket name
     try {
-      client.listKeys("invalid-volume", helper.getBucket().getName(),
+      helperClient.listKeys("invalid-volume", helper.getBucket().getName(),
           null, null, 100);
       fail("List keys should have thrown when using invalid volume name.");
     } catch (IOException e) {
@@ -551,7 +553,7 @@ public class TestKeys {
     }
 
     try {
-      client.listKeys(helper.getVol().getName(), "invalid-bucket", null,
+      helperClient.listKeys(helper.getVol().getName(), "invalid-bucket", null,
           null, 100);
       fail("List keys should have thrown when using invalid bucket name.");
     } catch (IOException e) {
@@ -697,10 +699,10 @@ public class TestKeys {
                   .KeyValueContainer);
           KeyValueContainer container = (KeyValueContainer) cm.getContainerSet()
               .getContainer(location.getBlockID().getContainerID());
-          KeyData blockInfo = keyValueHandler
-              .getKeyManager().getKey(container, location.getBlockID());
-          KeyValueContainerData containerData = (KeyValueContainerData) container
-              .getContainerData();
+          BlockData blockInfo = keyValueHandler
+              .getBlockManager().getBlock(container, location.getBlockID());
+          KeyValueContainerData containerData =
+              (KeyValueContainerData) container.getContainerData();
           File dataDir = new File(containerData.getChunksPath());
           for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) {
             File chunkFile = dataDir.toPath()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
index 9c451e2..3e740d9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
@@ -54,7 +54,7 @@ public class BucketManagerImpl implements BucketManager {
 
   /**
    * MetadataDB is maintained in MetadataManager and shared between
-   * BucketManager and VolumeManager. (and also by KeyManager)
+   * BucketManager and VolumeManager. (and also by BlockManager)
    *
    * BucketManager uses MetadataDB to store bucket level information.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
index 2da60de..eb533e8 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestIngClient.java
@@ -102,7 +102,7 @@ public class ScmBlockLocationTestIngClient implements ScmBlockLocationProtocol {
   }
 
   /**
-   * Returns Fake blocks to the KeyManager so we get blocks in the Database.
+   * Returns Fake blocks to the BlockManager so we get blocks in the Database.
    * @param size - size of the block.
    * @param type Replication Type
    * @param factor - Replication factor

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
index 5ac7e0a..8811d91 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java
@@ -48,17 +48,16 @@ import java.util.Random;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
 
-
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .PutBlockRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .GetBlockRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ReadChunkRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutKeyRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetKeyRequestProto;
 
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
@@ -141,7 +140,7 @@ public class BenchMarkDatanodeDispatcher {
         long containerID = containers.get(y);
         BlockID  blockID = new BlockID(containerID, key);
         dispatcher
-            .dispatch(getPutKeyCommand(blockID, chunkName));
+            .dispatch(getPutBlockCommand(blockID, chunkName));
         dispatcher.dispatch(getWriteChunkCommand(blockID, chunkName));
       }
     }
@@ -213,38 +212,39 @@ public class BenchMarkDatanodeDispatcher {
     return builder.build();
   }
 
-  private ContainerCommandRequestProto getPutKeyCommand(
+  private ContainerCommandRequestProto getPutBlockCommand(
       BlockID blockID, String chunkKey) {
-    PutKeyRequestProto.Builder putKeyRequest = PutKeyRequestProto
+    PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto
         .newBuilder()
-        .setKeyData(getKeyData(blockID, chunkKey));
+        .setBlockData(getBlockData(blockID, chunkKey));
 
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder();
-    request.setCmdType(ContainerProtos.Type.PutKey)
+    request.setCmdType(ContainerProtos.Type.PutBlock)
         .setContainerID(blockID.getContainerID())
         .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
-        .setPutKey(putKeyRequest);
+        .setPutBlock(putBlockRequest);
     return request.build();
   }
 
-  private ContainerCommandRequestProto getGetKeyCommand(BlockID blockID) {
-    GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto.newBuilder()
+  private ContainerCommandRequestProto getGetBlockCommand(BlockID blockID) {
+    GetBlockRequestProto.Builder readBlockRequest =
+        GetBlockRequestProto.newBuilder()
         .setBlockID(blockID.getDatanodeBlockIDProtobuf());
     ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto
         .newBuilder()
-        .setCmdType(ContainerProtos.Type.GetKey)
+        .setCmdType(ContainerProtos.Type.GetBlock)
         .setContainerID(blockID.getContainerID())
         .setTraceID(getBlockTraceID(blockID))
         .setDatanodeUuid(datanodeUuid)
-        .setGetKey(readKeyRequest);
+        .setGetBlock(readBlockRequest);
     return request.build();
   }
 
-  private ContainerProtos.KeyData getKeyData(
+  private ContainerProtos.BlockData getBlockData(
       BlockID blockID, String chunkKey) {
-    ContainerProtos.KeyData.Builder builder =  ContainerProtos.KeyData
+    ContainerProtos.BlockData.Builder builder =  ContainerProtos.BlockData
         .newBuilder()
         .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .addChunks(getChunkInfo(blockID, chunkKey));
@@ -275,16 +275,16 @@ public class BenchMarkDatanodeDispatcher {
   }
 
   @Benchmark
-  public void putKey(BenchMarkDatanodeDispatcher bmdd) {
+  public void putBlock(BenchMarkDatanodeDispatcher bmdd) {
     BlockID blockID = getRandomBlockID();
     String chunkKey = getNewChunkToWrite();
-    bmdd.dispatcher.dispatch(getPutKeyCommand(blockID, chunkKey));
+    bmdd.dispatcher.dispatch(getPutBlockCommand(blockID, chunkKey));
   }
 
   @Benchmark
-  public void getKey(BenchMarkDatanodeDispatcher bmdd) {
+  public void getBlock(BenchMarkDatanodeDispatcher bmdd) {
     BlockID blockID = getRandomBlockID();
-    bmdd.dispatcher.dispatch(getGetKeyCommand(blockID));
+    bmdd.dispatcher.dispatch(getGetBlockCommand(blockID));
   }
 
   // Chunks writes from benchmark only reaches certain containers


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HADOOP-15764. [JDK10] Migrate from sun.net.dns.ResolverConfiguration to the replacement. Contributed by Akira Ajisaka.

Posted by xy...@apache.org.
HADOOP-15764. [JDK10] Migrate from sun.net.dns.ResolverConfiguration to the replacement. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/429a07e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/429a07e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/429a07e0

Branch: refs/heads/HDDS-4
Commit: 429a07e08c8c919b1679c0a80df73d147d95e8a6
Parents: 3da94a3
Author: Ewan Higgs <ew...@wdc.com>
Authored: Thu Sep 20 15:13:55 2018 +0200
Committer: Ewan Higgs <ew...@wdc.com>
Committed: Thu Sep 20 15:13:55 2018 +0200

----------------------------------------------------------------------
 .../hadoop-client-minicluster/pom.xml            | 17 ++++-------------
 .../hadoop-client-runtime/pom.xml                | 11 +++++++++++
 hadoop-common-project/hadoop-common/pom.xml      |  5 +++++
 .../org/apache/hadoop/security/SecurityUtil.java | 19 +++++++++++++------
 4 files changed, 33 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-client-modules/hadoop-client-minicluster/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index ea8d680..70fca8a 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -318,6 +318,10 @@
           <groupId>commons-net</groupId>
           <artifactId>commons-net</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>dnsjava</groupId>
+          <artifactId>dnsjava</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <!-- Add optional runtime dependency on the in-development timeline server module
@@ -773,19 +777,6 @@
                         <exclude>ehcache-core.xsd</exclude>
                       </excludes>
                     </filter>
-
-                   <!-- remove utility classes which are not required from
-                        dnsjava -->
-                    <filter>
-                      <artifact>dnsjava:dnsjava</artifact>
-                      <excludes>
-                        <excldue>dig*</excldue>
-                        <exclude>jnamed*</exclude>
-                        <exlcude>lookup*</exlcude>
-                        <exclude>update*</exclude>
-                      </excludes>
-                    </filter>
-
                   </filters>
 
                   <!-- relocate classes from mssql-jdbc -->

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 532fae9..bfa6c15 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -212,6 +212,17 @@
                         <exclude>ccache.txt</exclude>
                       </excludes>
                     </filter>
+                    <!-- remove utility classes which are not required from
+                         dnsjava -->
+                    <filter>
+                      <artifact>dnsjava:dnsjava</artifact>
+                      <excludes>
+                        <exclude>dig*</exclude>
+                        <exclude>jnamed*</exclude>
+                        <exclude>lookup*</exclude>
+                        <exclude>update*</exclude>
+                      </excludes>
+                    </filter>
                   </filters>
                   <relocations>
                     <relocation>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 695dcde..1e6da92 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -324,6 +324,11 @@
       <artifactId>mockwebserver</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>dnsjava</groupId>
+      <artifactId>dnsjava</artifactId>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 0de334a..9fea535 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -27,6 +27,7 @@ import java.net.URI;
 import java.net.UnknownHostException;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -52,8 +53,9 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ZKUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-//this will need to be replaced someday when there is a suitable replacement
-import sun.net.dns.ResolverConfiguration;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.ResolverConfig;
+
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.net.InetAddresses;
@@ -584,10 +586,15 @@ public final class SecurityUtil {
    *       hadoop.security.token.service.use_ip=false 
    */
   protected static class QualifiedHostResolver implements HostResolver {
-    @SuppressWarnings("unchecked")
-    private List<String> searchDomains =
-        ResolverConfiguration.open().searchlist();
-    
+    private List<String> searchDomains;
+    {
+      ResolverConfig resolverConfig = ResolverConfig.getCurrentConfig();
+      searchDomains = new ArrayList<>();
+      for (Name name : resolverConfig.searchPath()) {
+        searchDomains.add(name.toString());
+      }
+    }
+
     /**
      * Create an InetAddress with a fully qualified hostname of the given
      * hostname.  InetAddress does not qualify an incomplete hostname that


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"

Posted by xy...@apache.org.
Revert "Bad merge with 996a627b289947af3894bf83e7b63ec702a665cd"

This reverts commit 996a627b289947af3894bf83e7b63ec702a665cd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9aeff85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9aeff85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9aeff85

Branch: refs/heads/HDDS-4
Commit: a9aeff85efd6b8869b3a525916440203a6ab56fb
Parents: 0c93793
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue May 15 16:56:24 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Sep 20 15:13:07 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 4 ----
 1 file changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9aeff85/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index bd4dbfd..efb5aa6 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -152,7 +152,6 @@
     </description>
   </property>
   <property>
-<<<<<<< HEAD
     <name>dfs.ratis.client.request.timeout.duration</name>
     <value>3s</value>
     <tag>OZONE, RATIS, MANAGEMENT</tag>
@@ -210,9 +209,6 @@
   </property>
   <property>
     <name>hdds.container.report.interval</name>
-=======
-    <name>ozone.container.report.interval</name>
->>>>>>> HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.
     <value>60000ms</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>
     <description>Time interval of the datanode to send container report. Each


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDFS-13886. HttpFSFileSystem.getFileStatus() doesn't return "snapshot enabled" bit. Contributed by Siyao Meng.

Posted by xy...@apache.org.
HDFS-13886. HttpFSFileSystem.getFileStatus() doesn't return "snapshot enabled" bit. Contributed by Siyao Meng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44857476
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44857476
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44857476

Branch: refs/heads/HDDS-4
Commit: 44857476fa993fbf9c97f979b91e19d27632c10a
Parents: 8382b86
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Tue Sep 18 15:33:02 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Tue Sep 18 15:33:02 2018 -0700

----------------------------------------------------------------------
 .../hadoop/fs/http/client/HttpFSFileSystem.java |  2 +-
 .../hadoop/fs/http/server/FSOperations.java     |  3 ++
 .../fs/http/client/BaseTestHttpFSWith.java      | 35 +++++++++++++++++++-
 3 files changed, 38 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44857476/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index ce76f05..dd285d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -199,7 +199,7 @@ public class HttpFSFileSystem extends FileSystem
 
   public static final String ENC_BIT_JSON = "encBit";
   public static final String EC_BIT_JSON = "ecBit";
-  public static final String SNAPSHOT_BIT_JSON = "seBit";
+  public static final String SNAPSHOT_BIT_JSON = "snapshotEnabled";
 
   public static final String DIRECTORY_LISTING_JSON = "DirectoryListing";
   public static final String PARTIAL_LISTING_JSON = "partialListing";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44857476/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 1d47a61..a3c45c79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -120,6 +120,9 @@ public class FSOperations {
     if (fileStatus.getPermission().getErasureCodedBit()) {
       json.put(HttpFSFileSystem.EC_BIT_JSON, true);
     }
+    if (fileStatus.isSnapshotEnabled()) {
+      json.put(HttpFSFileSystem.SNAPSHOT_BIT_JSON, true);
+    }
     return json;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44857476/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index a6dce4d..8dabdea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -376,6 +376,35 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     Assert.assertEquals(stati[0].getPath(), statl[0].getPath());
   }
 
+  private void testFileStatusAttr() throws Exception {
+    if (!this.isLocalFS()) {
+      // Create a directory
+      Path path = new Path("/tmp/tmp-snap-test");
+      DistributedFileSystem distributedFs = (DistributedFileSystem) FileSystem
+          .get(path.toUri(), this.getProxiedFSConf());
+      distributedFs.mkdirs(path);
+      // Get the FileSystem instance that's being tested
+      FileSystem fs = this.getHttpFSFileSystem();
+      // Check FileStatus
+      assertFalse("Snapshot should be disallowed by default",
+          fs.getFileStatus(path).isSnapshotEnabled());
+      // Allow snapshot
+      distributedFs.allowSnapshot(path);
+      // Check FileStatus
+      assertTrue("Snapshot enabled bit is not set in FileStatus",
+          fs.getFileStatus(path).isSnapshotEnabled());
+      // Disallow snapshot
+      distributedFs.disallowSnapshot(path);
+      // Check FileStatus
+      assertFalse("Snapshot enabled bit is not cleared in FileStatus",
+          fs.getFileStatus(path).isSnapshotEnabled());
+      // Cleanup
+      fs.delete(path, true);
+      fs.close();
+      distributedFs.close();
+    }
+  }
+
   private static void assertSameListing(FileSystem expected, FileSystem
       actual, Path p) throws IOException {
     // Consume all the entries from both iterators
@@ -1041,7 +1070,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
     GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH,
     GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING,
-    CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT
+    CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT,
+    FILE_STATUS_ATTR
   }
 
   private void operation(Operation op) throws Exception {
@@ -1139,6 +1169,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     case DELETE_SNAPSHOT:
       testDeleteSnapshot();
       break;
+    case FILE_STATUS_ATTR:
+      testFileStatusAttr();
+      break;
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDDS-497. Suppress license warnings for error log files. Contributed by Arpit Agarwal.

Posted by xy...@apache.org.
HDDS-497. Suppress license warnings for error log files. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb85351d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb85351d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb85351d

Branch: refs/heads/HDDS-4
Commit: fb85351dc6506e92a7c8c3878d1897291b7850d0
Parents: 27978bc
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Sep 18 21:18:15 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Sep 18 21:18:15 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/pom.xml  |  7 ++++---
 hadoop-ozone/pom.xml | 30 +++++++++++++++---------------
 2 files changed, 19 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb85351d/hadoop-hdds/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 563ce9e..ce4f234 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -93,6 +93,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
           <excludes>
+            <exclude>**/hs_err*.log</exclude>
             <exclude>**/target/**</exclude>
             <exclude>.gitattributes</exclude>
             <exclude>.idea/**</exclude>
@@ -100,13 +101,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <exclude>src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js</exclude>
             <exclude>src/main/resources/webapps/static/angular-route-1.6.4.min.js</exclude>
             <exclude>src/main/resources/webapps/static/d3-3.5.17.min.js</exclude>
-            <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.css</exclude>
             <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.css.map</exclude>
-            <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js</exclude>
+            <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.css</exclude>
             <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js.map</exclude>
-            <exclude>src/test/resources/incorrect.container</exclude>
+            <exclude>src/main/resources/webapps/static/nvd3-1.8.5.min.js</exclude>
             <exclude>src/test/resources/additionalfields.container</exclude>
             <exclude>src/test/resources/incorrect.checksum.container</exclude>
+            <exclude>src/test/resources/incorrect.container</exclude>
             <exclude>src/test/resources/test.db.ini</exclude>
           </excludes>
         </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb85351d/hadoop-ozone/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 3c07b64..c73be60 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -120,37 +120,37 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         <artifactId>apache-rat-plugin</artifactId>
         <configuration>
           <excludes>
+            <exclude>**/*.json</exclude>
+            <exclude>**/hs_err*.log</exclude>
             <exclude>**/target/**</exclude>
             <exclude>.gitattributes</exclude>
             <exclude>.idea/**</exclude>
+            <exclude>dev-support/*tests</exclude>
             <exclude>dev-support/checkstyle*</exclude>
             <exclude>dev-support/jdiff/**</exclude>
-            <exclude>dev-support/*tests</exclude>
-            <exclude>src/test/empty-file</exclude>
-            <exclude>src/test/all-tests</exclude>
-            <exclude>src/test/resources/*.tgz</exclude>
-            <exclude>src/test/resources/data*</exclude>
-            <exclude>**/*.json</exclude>
-            <exclude>src/test/resources/empty-file</exclude>
+            <exclude>src/contrib/**</exclude>
             <exclude>src/main/webapps/datanode/robots.txt</exclude>
             <exclude>src/main/webapps/hdfs/robots.txt</exclude>
             <exclude>src/main/webapps/journal/robots.txt</exclude>
-            <exclude>src/main/webapps/secondary/robots.txt</exclude>
             <exclude>src/main/webapps/router/robots.txt</exclude>
-            <exclude>src/contrib/**</exclude>
+            <exclude>src/main/webapps/secondary/robots.txt</exclude>
             <exclude>src/site/resources/images/*</exclude>
+            <exclude>src/test/all-tests</exclude>
+            <exclude>src/test/empty-file</exclude>
+            <exclude>src/test/resources/*.tgz</exclude>
+            <exclude>src/test/resources/data*</exclude>
+            <exclude>src/test/resources/empty-file</exclude>
+            <exclude>webapps/static/angular-1.6.4.min.js</exclude>
+            <exclude>webapps/static/angular-nvd3-1.0.9.min.js</exclude>
+            <exclude>webapps/static/angular-route-1.6.4.min.js</exclude>
             <exclude>webapps/static/bootstrap-3.3.7/**</exclude>
+            <exclude>webapps/static/d3-3.5.17.min.js</exclude>
             <exclude>webapps/static/jquery-3.3.1.min.js</exclude>
             <exclude>webapps/static/jquery.dataTables.min.js</exclude>
             <exclude>webapps/static/nvd3-1.8.5.min.css.map</exclude>
-            <exclude>webapps/static/nvd3-1.8.5.min.js</exclude>
-            <exclude>webapps/static/angular-route-1.6.4.min.js
-            </exclude>
             <exclude>webapps/static/nvd3-1.8.5.min.css</exclude>
-            <exclude>webapps/static/angular-nvd3-1.0.9.min.js</exclude>
             <exclude>webapps/static/nvd3-1.8.5.min.js.map</exclude>
-            <exclude>webapps/static/angular-1.6.4.min.js</exclude>
-            <exclude>webapps/static/d3-3.5.17.min.js</exclude>
+            <exclude>webapps/static/nvd3-1.8.5.min.js</exclude>
           </excludes>
         </configuration>
       </plugin>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HADOOP-15684. triggerActiveLogRoll stuck on dead name node, when ConnectTimeoutException happens. Contributed by Rong Tang.

Posted by xy...@apache.org.
HADOOP-15684. triggerActiveLogRoll stuck on dead name node, when ConnectTimeoutException happens. Contributed by Rong Tang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/090272d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/090272d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/090272d7

Branch: refs/heads/HDDS-4
Commit: 090272d7de7cb5f1133359d66780aef7c5cce5c9
Parents: 236d16e
Author: Inigo Goiri <in...@apache.org>
Authored: Wed Sep 19 12:58:31 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Wed Sep 19 13:00:30 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/ha/EditLogTailer.java  | 34 ++-----
 .../apache/hadoop/hdfs/MiniDFSNNTopology.java   | 17 ++++
 .../server/namenode/ha/TestEditLogTailer.java   | 98 +++++++++++++-------
 3 files changed, 93 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/090272d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 4ba2aa3..fc5f3a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -53,8 +53,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.SecurityUtil;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
@@ -382,15 +380,6 @@ public class EditLogTailer {
       future.get(rollEditsTimeoutMs, TimeUnit.MILLISECONDS);
       lastRollTriggerTxId = lastLoadedTxnId;
     } catch (ExecutionException e) {
-      Throwable cause = e.getCause();
-      if (cause instanceof RemoteException) {
-        IOException ioe = ((RemoteException) cause).unwrapRemoteException();
-        if (ioe instanceof StandbyException) {
-          LOG.info("Skipping log roll. Remote node is not in Active state: " +
-              ioe.getMessage().split("\n")[0]);
-          return;
-        }
-      }
       LOG.warn("Unable to trigger a roll of the active NN", e);
     } catch (TimeoutException e) {
       if (future != null) {
@@ -497,7 +486,8 @@ public class EditLogTailer {
    * This mechanism is <b>very bad</b> for cases where we care about being <i>fast</i>; it just
    * blindly goes and tries namenodes.
    */
-  private abstract class MultipleNameNodeProxy<T> implements Callable<T> {
+  @VisibleForTesting
+  abstract class MultipleNameNodeProxy<T> implements Callable<T> {
 
     /**
      * Do the actual work to the remote namenode via the {@link #cachedActiveProxy}.
@@ -513,19 +503,13 @@ public class EditLogTailer {
         try {
           T ret = doWork();
           return ret;
-        } catch (RemoteException e) {
-          Throwable cause = e.unwrapRemoteException(StandbyException.class);
-          // if its not a standby exception, then we need to re-throw it, something bad has happened
-          if (cause == e) {
-            throw e;
-          } else {
-            // it is a standby exception, so we try the other NN
-            LOG.warn("Failed to reach remote node: " + currentNN
-                + ", retrying with remaining remote NNs");
-            cachedActiveProxy = null;
-            // this NN isn't responding to requests, try the next one
-            nnLoopCount++;
-          }
+        } catch (IOException e) {
+          LOG.warn("Exception from remote name node " + currentNN
+              + ", try next.", e);
+
+          // Try next name node if exception happens.
+          cachedActiveProxy = null;
+          nnLoopCount++;
         }
       }
       throw new IOException("Cannot find any valid remote NN to service request!");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/090272d7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
index c21ff80..390c61c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java
@@ -73,6 +73,23 @@ public class MiniDFSNNTopology {
   }
 
   /**
+   * Set up an HA topology with a single HA nameservice.
+   * @param nnCount of namenodes to use with the nameservice
+   * @param basePort for IPC and Http ports of namenodes.
+   */
+  public static MiniDFSNNTopology simpleHATopology(int nnCount, int basePort) {
+    MiniDFSNNTopology.NSConf ns = new MiniDFSNNTopology.NSConf("minidfs-ns");
+    for (int i = 0; i < nnCount; i++) {
+      ns.addNN(new MiniDFSNNTopology.NNConf("nn" + i)
+          .setIpcPort(basePort++)
+          .setHttpPort(basePort++));
+    }
+    MiniDFSNNTopology topology = new MiniDFSNNTopology()
+        .addNameservice(ns);
+    return topology;
+  }
+
+  /**
    * Set up federated cluster with the given number of nameservices, each
    * of which has only a single NameNode.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/090272d7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
index 68b3e2b..b94cd2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
@@ -28,6 +28,7 @@ import java.net.BindException;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Random;
 import java.util.concurrent.Callable;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -46,7 +47,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
 import org.junit.Test;
@@ -177,21 +177,7 @@ public class TestEditLogTailer {
     MiniDFSCluster cluster = null;
     for (int i = 0; i < 5; i++) {
       try {
-        // Have to specify IPC ports so the NNs can talk to each other.
-        int[] ports = ServerSocketUtil.getPorts(3);
-        MiniDFSNNTopology topology = new MiniDFSNNTopology()
-            .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-                .addNN(new MiniDFSNNTopology.NNConf("nn1")
-                    .setIpcPort(ports[0]))
-                .addNN(new MiniDFSNNTopology.NNConf("nn2")
-                    .setIpcPort(ports[1]))
-                .addNN(new MiniDFSNNTopology.NNConf("nn3")
-                    .setIpcPort(ports[2])));
-
-        cluster = new MiniDFSCluster.Builder(conf)
-          .nnTopology(topology)
-          .numDataNodes(0)
-          .build();
+        cluster = createMiniDFSCluster(conf, 3);
         break;
       } catch (BindException e) {
         // retry if race on ports given by ServerSocketUtil#getPorts
@@ -222,21 +208,9 @@ public class TestEditLogTailer {
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
 
-    // Have to specify IPC ports so the NNs can talk to each other.
-    MiniDFSNNTopology topology = new MiniDFSNNTopology()
-        .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-            .addNN(new MiniDFSNNTopology.NNConf("nn1")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100)))
-            .addNN(new MiniDFSNNTopology.NNConf("nn2")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100)))
-            .addNN(new MiniDFSNNTopology.NNConf("nn3")
-                .setIpcPort(ServerSocketUtil.getPort(0, 100))));
-
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(topology)
-        .numDataNodes(0)
-        .build();
+    MiniDFSCluster cluster = null;
     try {
+      cluster = createMiniDFSCluster(conf, 3);
       cluster.transitionToStandby(0);
       cluster.transitionToStandby(1);
       cluster.transitionToStandby(2);
@@ -249,7 +223,9 @@ public class TestEditLogTailer {
       cluster.transitionToActive(0);
       waitForLogRollInSharedDir(cluster, 3);
     } finally {
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
   
@@ -316,4 +292,64 @@ public class TestEditLogTailer {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testRollEditLogIOExceptionForRemoteNN() throws IOException {
+    Configuration conf = getConf();
+
+    // Roll every 1s
+    conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = createMiniDFSCluster(conf, 3);
+      cluster.transitionToActive(0);
+      EditLogTailer tailer = Mockito.spy(
+          cluster.getNamesystem(1).getEditLogTailer());
+
+      final AtomicInteger invokedTimes = new AtomicInteger(0);
+
+      // It should go on to next name node when IOException happens.
+      when(tailer.getNameNodeProxy()).thenReturn(
+          tailer.new MultipleNameNodeProxy<Void>() {
+            @Override
+            protected Void doWork() throws IOException {
+              invokedTimes.getAndIncrement();
+              throw new IOException("It is an IO Exception.");
+            }
+          }
+      );
+
+      tailer.triggerActiveLogRoll();
+
+      // MultipleNameNodeProxy uses Round-robin to look for active NN
+      // to do RollEditLog. If doWork() fails, then IOException throws,
+      // it continues to try next NN. triggerActiveLogRoll finishes
+      // either due to success, or using up retries.
+      // In this test case, there are 2 remote name nodes, default retry is 3.
+      // For test purpose, doWork() always returns IOException,
+      // so the total invoked times will be default retry 3 * remote NNs 2 = 6
+      assertEquals(6, invokedTimes.get());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  private static MiniDFSCluster createMiniDFSCluster(Configuration conf,
+      int nnCount) throws IOException {
+    int basePort = 10060 + new Random().nextInt(100) * 2;
+
+    // By passing in basePort, name node will have IPC port set,
+    // which is needed for enabling roll log.
+    MiniDFSNNTopology topology =
+            MiniDFSNNTopology.simpleHATopology(nnCount, basePort);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(topology)
+        .numDataNodes(0)
+        .build();
+    return cluster;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HDDS-440. Datanode loops forever if it cannot create directories. Contributed by Bharat Viswanadham.

Posted by xy...@apache.org.
HDDS-440. Datanode loops forever if it cannot create directories.
Contributed by Bharat Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a968ea48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a968ea48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a968ea48

Branch: refs/heads/HDDS-4
Commit: a968ea489743ed09d63a6e267e34491e490cd2d8
Parents: e71f61e
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Sep 18 14:31:50 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Sep 18 14:31:50 2018 -0700

----------------------------------------------------------------------
 .../states/datanode/InitDatanodeState.java      | 12 +++++-
 .../common/TestDatanodeStateMachine.java        | 42 ++++++++++++++++++++
 2 files changed, 52 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a968ea48/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
index b348327..995f172 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
@@ -116,7 +116,7 @@ public class InitDatanodeState implements DatanodeState,
   /**
    * Persist DatanodeDetails to datanode.id file.
    */
-  private void persistContainerDatanodeDetails() throws IOException {
+  private void persistContainerDatanodeDetails() {
     String dataNodeIDPath = HddsUtils.getDatanodeIdFilePath(conf);
     if (Strings.isNullOrEmpty(dataNodeIDPath)) {
       LOG.error("A valid file path is needed for config setting {}",
@@ -128,7 +128,15 @@ public class InitDatanodeState implements DatanodeState,
     DatanodeDetails datanodeDetails = this.context.getParent()
         .getDatanodeDetails();
     if (datanodeDetails != null && !idPath.exists()) {
-      ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
+      try {
+        ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
+      } catch (IOException ex) {
+        // As writing DatanodeDetails in to datanodeid file failed, which is
+        // a critical thing, so shutting down the state machine.
+        LOG.error("Writing to {} failed {}", dataNodeIDPath, ex.getMessage());
+        this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
+        return;
+      }
       LOG.info("DatanodeDetails is persisted to {}", dataNodeIDPath);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a968ea48/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 59029db..3fc0dd0 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -311,6 +311,48 @@ public class TestDatanodeStateMachine {
     }
   }
 
+  @Test
+  public void testDatanodeStateMachineWithIdWriteFail() throws Exception {
+
+    File idPath = new File(
+        conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID));
+    idPath.delete();
+    DatanodeDetails datanodeDetails = getNewDatanodeDetails();
+    DatanodeDetails.Port port = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE,
+        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+    datanodeDetails.setPort(port);
+
+    try (DatanodeStateMachine stateMachine =
+             new DatanodeStateMachine(datanodeDetails, conf)) {
+      DatanodeStateMachine.DatanodeStates currentState =
+          stateMachine.getContext().getState();
+      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.INIT,
+          currentState);
+
+      DatanodeState<DatanodeStateMachine.DatanodeStates> task =
+          stateMachine.getContext().getTask();
+      Assert.assertEquals(InitDatanodeState.class, task.getClass());
+
+      //Set the idPath to read only, state machine will fail to write
+      // datanodeId file and set the state to shutdown.
+      idPath.getParentFile().mkdirs();
+      idPath.getParentFile().setReadOnly();
+
+      task.execute(executorService);
+      DatanodeStateMachine.DatanodeStates newState =
+          task.await(2, TimeUnit.SECONDS);
+
+      //As, we have changed the permission of idPath to readable, writing
+      // will fail and it will set the state to shutdown.
+      Assert.assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN,
+          newState);
+
+      //Setting back to writable.
+      idPath.getParentFile().setWritable(true);
+    }
+  }
+
   /**
    * Test state transition with a list of invalid scm configurations,
    * and verify the state transits to SHUTDOWN each time.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HDDS-501. AllocateBlockResponse.keyLocation must be an optional field. Contributed by Arpit Agarwal.

Posted by xy...@apache.org.
HDDS-501. AllocateBlockResponse.keyLocation must be an optional field. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f176e8a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f176e8a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f176e8a3

Branch: refs/heads/HDDS-4
Commit: f176e8a3aeca2f72896a55e9d28d320ce3d3f76c
Parents: 44857476
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Sep 18 15:41:28 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Sep 18 15:41:28 2018 -0700

----------------------------------------------------------------------
 hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f176e8a3/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
index 242e3b5..975c790 100644
--- a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -324,7 +324,7 @@ message AllocateBlockRequest {
 
 message AllocateBlockResponse {
     required Status status = 1;
-    required KeyLocation keyLocation = 2;
+    optional KeyLocation keyLocation = 2;
 }
 
 message CommitKeyRequest {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dc72d12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dc72d12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dc72d12

Branch: refs/heads/HDDS-4
Commit: 9dc72d12181acf42ce3f807eb485356154ecda34
Parents: 524f7cd
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Sep 20 15:13:06 2018 -0700

----------------------------------------------------------------------
 .../authentication/util/KerberosUtil.java       |   2 +-
 .../conf/TestConfigurationFieldsBase.java       |   2 +
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  13 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   9 +-
 .../scm/protocol/ScmBlockLocationProtocol.java  |   3 +
 .../StorageContainerLocationProtocol.java       |   4 +
 .../protocolPB/ScmBlockLocationProtocolPB.java  |   6 +
 .../StorageContainerLocationProtocolPB.java     |   4 +
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |   3 +
 .../common/src/main/resources/ozone-default.xml |  35 +++-
 .../StorageContainerDatanodeProtocol.java       |   4 +
 .../StorageContainerDatanodeProtocolPB.java     |   6 +
 .../scm/server/StorageContainerManager.java     |  51 ++++-
 .../StorageContainerManagerHttpServer.java      |   5 +-
 .../ozone/client/protocol/ClientProtocol.java   |   3 +
 hadoop-ozone/common/src/main/bin/start-ozone.sh |   7 +
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |  13 +-
 hadoop-ozone/integration-test/pom.xml           |   6 +
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |  17 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java    | 205 +++++++++++++++++++
 .../ozone/TestStorageContainerManager.java      |   5 +-
 21 files changed, 366 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index c011045..4459928 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -167,7 +167,7 @@ public class KerberosUtil {
   }
 
   /* Return fqdn of the current host */
-  static String getLocalHostName() throws UnknownHostException {
+  public static String getLocalHostName() throws UnknownHostException {
     return InetAddress.getLocalHost().getCanonicalHostName();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index 152159b..bce1cd5 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -436,6 +436,8 @@ public abstract class TestConfigurationFieldsBase {
     // Create XML key/value map
     LOG_XML.debug("Reading XML property files\n");
     xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
+    // Remove hadoop property set in ozone-default.xml
+    xmlKeyValueMap.remove("hadoop.custom.tags");
     LOG_XML.debug("\n=====\n");
 
     // Create default configuration variable key/value map

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index db9d374..efafb5c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -242,18 +242,7 @@ public final class HddsUtils {
   }
 
   public static boolean isHddsEnabled(Configuration conf) {
-    String securityEnabled =
-        conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
-            "simple");
-    boolean securityAuthorizationEnabled = conf.getBoolean(
-        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false);
-
-    if (securityEnabled.equals("kerberos") || securityAuthorizationEnabled) {
-      LOG.error("Ozone is not supported in a security enabled cluster. ");
-      return false;
-    } else {
-      return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
-    }
+    return conf.getBoolean(OZONE_ENABLED, OZONE_ENABLED_DEFAULT);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 63f5916..e8b0930 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -158,8 +158,9 @@ public final class ScmConfigKeys {
       "ozone.scm.http-address";
   public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
       "ozone.scm.https-address";
-  public static final String OZONE_SCM_KEYTAB_FILE =
-      "ozone.scm.keytab.file";
+  public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
+      "ozone.scm.kerberos.keytab.file";
+  public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = "ozone.scm.kerberos.principal";
   public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
@@ -278,6 +279,10 @@ public final class ScmConfigKeys {
   public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
       "10m";
 
+  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+      "ozone.scm.web.authentication.kerberos.principal";
+  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
+      "ozone.scm.web.authentication.kerberos.keytab";
   /**
    * Never constructed.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index c8d4a80..e17f1c2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdds.scm.protocol;
 
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -31,6 +33,7 @@ import java.util.List;
  * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
  * to read/write a block.
  */
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index c55062b..454b9f3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -17,6 +17,8 @@
 
 package org.apache.hadoop.hdds.scm.protocol;
 
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -27,11 +29,13 @@ import org.apache.hadoop.hdds.protocol.proto
 
 import java.io.IOException;
 import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
  * that currently host a container.
  */
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerLocationProtocol {
   /**
    * Asks SCM where a container should be allocated. SCM responds with the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
index 837c95b..89bb066 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
@@ -18,9 +18,13 @@
 package org.apache.hadoop.hdds.scm.protocolPB;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
     .ScmBlockLocationProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol used from an HDFS node to StorageContainerManager.  This extends the
@@ -30,6 +34,8 @@ import org.apache.hadoop.ipc.ProtocolInfo;
     "org.apache.hadoop.ozone.protocol.ScmBlockLocationProtocol",
     protocolVersion = 1)
 @InterfaceAudience.Private
+@KerberosInfo(
+    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocolPB
     extends ScmBlockLocationProtocolService.BlockingInterface {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
index f234ad3..3bd83f9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -21,7 +21,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerLocationProtocolProtos
     .StorageContainerLocationProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol used from an HDFS node to StorageContainerManager.  This extends the
@@ -30,6 +32,8 @@ import org.apache.hadoop.ipc.ProtocolInfo;
 @ProtocolInfo(protocolName =
     "org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
     protocolVersion = 1)
+@KerberosInfo(
+    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface StorageContainerLocationProtocolPB
     extends StorageContainerLocationProtocolService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 599b4e8..fb83052 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -296,6 +296,9 @@ public final class OzoneConfigKeys {
       "hdds.lock.suppress.warning.interval.ms";
   public static final long
       HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_DEAFULT = 10000L;
+  public static final String OZONE_SECURITY_ENABLED_KEY = "ozone.security.enabled";
+  public static final String OZONE_SYSTEM_TAGS_KEY = "ozone.system.tags";
+  public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false;
 
   public static final String OZONE_CONTAINER_COPY_WORKDIR =
       "hdds.datanode.replication.work.dir";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index f7681e8..65b4dc4 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1115,7 +1115,6 @@
       to occur.
     </description>
   </property>
-
   <property>
     <name>hdds.lock.suppress.warning.interval.ms</name>
     <value>10000</value>
@@ -1125,7 +1124,13 @@
       consecutive warnings within this interval.
     </description>
   </property>
-
+  <property>
+    <name>ozone.security.enabled</name>
+    <value>false</value>
+    <tag> OZONE, SECURITY, FLAG</tag>
+    <description>True if security is enabled for ozone. When this property is true, hadoop.security.authentication should be Kerberos.
+    </description>
+  </property>
   <property>
     <name>hdds.command.status.report.interval</name>
     <value>30s</value>
@@ -1193,7 +1198,6 @@
       datanode unless the datanode confirms the completion.
     </description>
   </property>
-
   <property>
     <name>hdds.db.profile</name>
     <value>SSD</value>
@@ -1202,7 +1206,6 @@
     that tunes the RocksDB settings for the hardware it is running
     on. Right now, we have SSD and DISK as profile options.</description>
   </property>
-
   <property>
     <name>hdds.datanode.replication.work.dir</name>
     <tag>DATANODE</tag>
@@ -1222,4 +1225,28 @@
     </description>
   </property>
 
+  <property>
+    <name>ozone.scm.kerberos.keytab.file</name>
+    <value></value>
+    <tag> OZONE, SECURITY</tag>
+    <description> The keytab file used by each SCM daemon to login as its
+      service principal. The principal name is configured with
+      ozone.scm.kerberos.principal.
+    </description>
+  </property>
+  <property>
+    <name>ozone.scm.kerberos.principal</name>
+    <value></value>
+    <tag> OZONE, SECURITY</tag>
+    <description>The SCM service principal. Ex scm/_HOST@REALM.TLD.</description>
+  </property>
+
+  <property>
+    <name>ozone.scm.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+  </property>
+  <property>
+    <name>ozone.scm.web.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/HTTP.keytab</value>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index 9296524..b723d28 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -41,11 +41,15 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 
 import java.io.IOException;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * The protocol spoken between datanodes and SCM. For specifics please the
  * Protoc file that defines this protocol.
  */
+@KerberosInfo(
+    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface StorageContainerDatanodeProtocol {
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
index 9b28b5a..9c32ef8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
@@ -19,7 +19,10 @@ package org.apache.hadoop.ozone.protocolPB;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos
     .StorageContainerDatanodeProtocolService;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * Protocol used from a datanode to StorageContainerManager.  This extends
@@ -29,6 +32,9 @@ import org.apache.hadoop.ipc.ProtocolInfo;
 @ProtocolInfo(protocolName =
     "org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
     protocolVersion = 1)
+@KerberosInfo(
+    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+    clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerDatanodeProtocolPB extends
     StorageContainerDatanodeProtocolService.BlockingInterface {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index b2cbc93..7d1ebb4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -28,10 +28,12 @@ import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
 import com.google.protobuf.BlockingService;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
 import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler;
@@ -78,6 +80,9 @@ import org.apache.hadoop.ozone.common.Storage.StorageState;
 import org.apache.hadoop.ozone.common.StorageInfo;
 import org.apache.hadoop.ozone.lease.LeaseManager;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.StringUtils;
 
@@ -100,6 +105,10 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_D
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
 
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 /**
@@ -176,6 +185,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   private final ReplicationManager replicationManager;
 
   private final LeaseManager<Long> commandWatcherLeaseManager;
+  private Configuration scmConf;
 
   private final ReplicationActivityStatus replicationStatus;
   private final SCMChillModeManager scmChillModeManager;
@@ -187,13 +197,19 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    *
    * @param conf configuration
    */
-  private StorageContainerManager(OzoneConfiguration conf) throws IOException {
+  private StorageContainerManager(OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
 
     final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
         OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-
+    this.scmConf = conf;
     StorageContainerManager.initMetrics();
     initContainerReportCache(conf);
+    // Authenticate SCM if security is enabled
+    if (this.scmConf.getBoolean(OZONE_SECURITY_ENABLED_KEY,
+        OZONE_SECURITY_ENABLED_DEFAULT)) {
+      loginAsSCMUser(this.scmConf);
+    }
 
     scmStorage = new SCMStorage(conf);
     if (scmStorage.getState() != StorageState.INITIALIZED) {
@@ -310,6 +326,33 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   }
 
   /**
+   * Login as the configured user for SCM.
+   *
+   * @param conf
+   */
+  private void loginAsSCMUser(Configuration conf)
+      throws IOException, AuthenticationException {
+    LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
+            + "Principal: {}, keytab: {}", this.scmConf.get
+            (OZONE_SCM_KERBEROS_PRINCIPAL_KEY),
+        this.scmConf.get(OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY));
+
+    if (SecurityUtil.getAuthenticationMethod(conf).equals
+        (AuthenticationMethod.KERBEROS)) {
+      UserGroupInformation.setConfiguration(this.scmConf);
+      InetSocketAddress socAddr = HddsServerUtil
+          .getScmBlockClientBindAddress(conf);
+      SecurityUtil.login(conf, OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+          OZONE_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+    } else {
+      throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
+          (conf) + " authentication method not support. "
+          + "SCM user login failed.");
+    }
+    LOG.info("SCM login successful.");
+  }
+
+  /**
    * Builds a message for logging startup information about an RPC server.
    *
    * @param description RPC server description
@@ -390,6 +433,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     out.println(USAGE + "\n");
   }
 
+<<<<<<< HEAD
   /**
    * Create an SCM instance based on the supplied command-line arguments.
    *
@@ -420,7 +464,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   private static StorageContainerManager createSCM(
       String[] args,
       OzoneConfiguration conf,
-      boolean printBanner) throws IOException {
+      boolean printBanner)
+      throws IOException, AuthenticationException {
     String[] argv = (args == null) ? new String[0] : args;
     if (!HddsUtils.isHddsEnabled(conf)) {
       System.err.println(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index 75b2036..da936ad 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdds.scm.server;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.server.BaseHttpServer;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 
 import java.io.IOException;
 
@@ -63,11 +62,11 @@ public class StorageContainerManagerHttpServer extends BaseHttpServer {
   }
 
   @Override protected String getKeytabFile() {
-    return ScmConfigKeys.OZONE_SCM_KEYTAB_FILE;
+    return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
   }
 
   @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+    return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
   }
 
   @Override protected String getEnabledKey() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 008b69d..f3c710b 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.client.protocol;
 
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.client.*;
 import org.apache.hadoop.hdds.client.OzoneQuota;
@@ -29,6 +30,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 
 import java.io.IOException;
 import java.util.List;
+import org.apache.hadoop.security.KerberosInfo;
 
 /**
  * An implementer of this interface is capable of connecting to Ozone Cluster
@@ -38,6 +40,7 @@ import java.util.List;
  * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
  * {@link  org.apache.hadoop.ozone.client.rest.RestClient} for REST.
  */
+@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ClientProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-ozone/common/src/main/bin/start-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index cfb54e0..536d09e 100755
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -75,6 +75,13 @@ if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} ==
   exit 1
 fi
 
+#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
+#  echo "Ozone is not supported in a security enabled cluster."
+#  exit 1
+#fi
+
 #---------------------------------------------------------
 # Check if ozone is enabled
 OZONE_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey ozone.enabled | tr '[:upper:]' '[:lower:]' 2>&-)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-ozone/common/src/main/bin/stop-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
index 97e1df4..22b2f9f 100755
--- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
@@ -47,13 +47,12 @@ else
   exit 1
 fi
 
-SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
-SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
-
-if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
-  echo "Ozone is not supported in a security enabled cluster."
-  exit 1
-fi
+#SECURITY_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authentication | tr '[:upper:]' '[:lower:]' 2>&-)
+#SECURITY_AUTHORIZATION_ENABLED=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -confKey hadoop.security.authorization | tr '[:upper:]' '[:lower:]' 2>&-)
+#if [[ ${SECURITY_ENABLED} == "kerberos" || ${SECURITY_AUTHORIZATION_ENABLED} == "true" ]]; then
+#  echo "Ozone is not supported in a security enabled cluster."
+#  exit 1
+#fi
 
 #---------------------------------------------------------
 # Check if ozone is enabled

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-ozone/integration-test/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml
index 993e91f..2331cfd 100644
--- a/hadoop-ozone/integration-test/pom.xml
+++ b/hadoop-ozone/integration-test/pom.xml
@@ -47,6 +47,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-objectstore-service</artifactId>
       <scope>provided</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index b34a7d1..6b341d3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.scm.protocolPB
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import org.slf4j.Logger;
@@ -347,9 +348,16 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     public MiniOzoneCluster build() throws IOException {
       DefaultMetricsSystem.setMiniClusterMode(true);
       initializeConfiguration();
-      StorageContainerManager scm = createSCM();
-      scm.start();
-      OzoneManager om = createOM();
+      StorageContainerManager scm;
+      OzoneManager om;
+      try {
+        scm = createSCM();
+        scm.start();
+        om = createOM();
+      } catch (AuthenticationException ex) {
+        throw new IOException("Unable to build MiniOzoneCluster. ", ex);
+      }
+
       om.start();
       final List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
       MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, om, scm,
@@ -380,7 +388,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
      *
      * @throws IOException
      */
-    private StorageContainerManager createSCM() throws IOException {
+    private StorageContainerManager createSCM()
+        throws IOException, AuthenticationException {
       configureSCM();
       SCMStorage scmStore = new SCMStorage(conf);
       initializeScmStorage(scmStore);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
new file mode 100644
index 0000000..9c430ad
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -0,0 +1,205 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Properties;
+import java.util.UUID;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.KerberosAuthException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test class to for security enabled Ozone cluster.
+ */
+@InterfaceAudience.Private
+public final class TestSecureOzoneCluster {
+
+  private Logger LOGGER = LoggerFactory
+      .getLogger(TestSecureOzoneCluster.class);
+
+  private MiniKdc miniKdc;
+  private OzoneConfiguration conf;
+  private File workDir;
+  private static Properties securityProperties;
+  private File scmKeytab;
+  private File spnegoKeytab;
+  private String curUser;
+
+  @Before
+  public void init() {
+    try {
+      conf = new OzoneConfiguration();
+      startMiniKdc();
+      setSecureConfig(conf);
+      createCredentialsInKDC(conf, miniKdc);
+    } catch (IOException e) {
+      LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
+    } catch (Exception e) {
+      LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
+    }
+  }
+
+  private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
+      throws Exception {
+    createPrincipal(scmKeytab,
+        conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
+    createPrincipal(spnegoKeytab,
+        conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+  }
+
+  private void createPrincipal(File keytab, String... principal)
+      throws Exception {
+    miniKdc.createPrincipal(keytab, principal);
+  }
+
+  private void startMiniKdc() throws Exception {
+    workDir = GenericTestUtils
+        .getTestDir(TestSecureOzoneCluster.class.getSimpleName());
+    securityProperties = MiniKdc.createConf();
+    miniKdc = new MiniKdc(securityProperties, workDir);
+    miniKdc.start();
+  }
+
+  private void setSecureConfig(Configuration conf) throws IOException {
+    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+    String host = KerberosUtil.getLocalHostName();
+    String realm = miniKdc.getRealm();
+    curUser = UserGroupInformation.getCurrentUser()
+        .getUserName();
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "kerberos");
+    conf.set(OZONE_ADMINISTRATORS, curUser);
+
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+        "scm/" + host + "@" + realm);
+    conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+        "HTTP_SCM/" + host + "@" + realm);
+
+    scmKeytab = new File(workDir, "scm.keytab");
+    spnegoKeytab = new File(workDir, "http.keytab");
+
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+        scmKeytab.getAbsolutePath());
+    conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
+        spnegoKeytab.getAbsolutePath());
+
+  }
+
+  @Test
+  public void testSecureScmStartupSuccess() throws Exception {
+    final String path = GenericTestUtils
+        .getTempPath(UUID.randomUUID().toString());
+    Path scmPath = Paths.get(path, "scm-meta");
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    SCMStorage scmStore = new SCMStorage(conf);
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    scmStore.setClusterId(clusterId);
+    scmStore.setScmId(scmId);
+    // writes the version file properties
+    scmStore.initialize();
+    StorageContainerManager scm = StorageContainerManager.createSCM(null, conf);
+    //Reads the SCM Info from SCM instance
+    ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo();
+    Assert.assertEquals(clusterId, scmInfo.getClusterId());
+    Assert.assertEquals(scmId, scmInfo.getScmId());
+  }
+
+  @Test
+  public void testSecureScmStartupFailure() throws Exception {
+    final String path = GenericTestUtils
+        .getTempPath(UUID.randomUUID().toString());
+    Path scmPath = Paths.get(path, "scm-meta");
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+        "scm@" + miniKdc.getRealm());
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "kerberos");
+
+    SCMStorage scmStore = new SCMStorage(conf);
+    String clusterId = UUID.randomUUID().toString();
+    String scmId = UUID.randomUUID().toString();
+    scmStore.setClusterId(clusterId);
+    scmStore.setScmId(scmId);
+    // writes the version file properties
+    scmStore.initialize();
+    LambdaTestUtils.intercept(IOException.class,
+        "Running in secure mode, but config doesn't have a keytab",
+        () -> {
+          StorageContainerManager.createSCM(null, conf);
+        });
+
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+        "scm/_HOST@EXAMPLE.com");
+    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+        "/etc/security/keytabs/scm.keytab");
+
+    LambdaTestUtils.intercept(KerberosAuthException.class, "failure "
+            + "to login: for principal:",
+        () -> {
+          StorageContainerManager.createSCM(null, conf);
+        });
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "OAuth2");
+
+    LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid"
+            + " attribute value for hadoop.security.authentication of OAuth2",
+        () -> {
+          StorageContainerManager.createSCM(null, conf);
+        });
+
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+        "KERBEROS_SSL");
+    LambdaTestUtils.intercept(AuthenticationException.class,
+        "KERBEROS_SSL authentication method not support.",
+        () -> {
+          StorageContainerManager.createSCM(null, conf);
+        });
+
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dc72d12/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 94ab6c8..301a0c3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -68,8 +68,10 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
+
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -434,7 +436,8 @@ public class TestStorageContainerManager {
   }
 
   @Test
-  public void testSCMInitializationFailure() throws IOException {
+  public void testSCMInitializationFailure()
+      throws IOException, AuthenticationException {
     OzoneConfiguration conf = new OzoneConfiguration();
     final String path =
         GenericTestUtils.getTempPath(UUID.randomUUID().toString());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: HADOOP-15756. [JDK10] Migrate from sun.net.util.IPAddressUtil to the replacement. Contributed by Akira Ajisaka.

Posted by xy...@apache.org.
HADOOP-15756. [JDK10] Migrate from sun.net.util.IPAddressUtil to the replacement. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3da94a36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3da94a36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3da94a36

Branch: refs/heads/HDDS-4
Commit: 3da94a36e21a315c09ec7edb7702820fe2b524f9
Parents: 646874c
Author: Ewan Higgs <ew...@wdc.com>
Authored: Thu Sep 20 14:53:21 2018 +0200
Committer: Ewan Higgs <ew...@wdc.com>
Committed: Thu Sep 20 14:53:21 2018 +0200

----------------------------------------------------------------------
 .../org/apache/hadoop/security/SecurityUtil.java     | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3da94a36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 5f8cb29..0de334a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -54,9 +54,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 //this will need to be replaced someday when there is a suitable replacement
 import sun.net.dns.ResolverConfiguration;
-import sun.net.util.IPAddressUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.net.InetAddresses;
 
 /**
  * Security Utils.
@@ -604,14 +604,11 @@ public final class SecurityUtil {
     public InetAddress getByName(String host) throws UnknownHostException {
       InetAddress addr = null;
 
-      if (IPAddressUtil.isIPv4LiteralAddress(host)) {
-        // use ipv4 address as-is
-        byte[] ip = IPAddressUtil.textToNumericFormatV4(host);
-        addr = InetAddress.getByAddress(host, ip);
-      } else if (IPAddressUtil.isIPv6LiteralAddress(host)) {
-        // use ipv6 address as-is
-        byte[] ip = IPAddressUtil.textToNumericFormatV6(host);
-        addr = InetAddress.getByAddress(host, ip);
+      if (InetAddresses.isInetAddress(host)) {
+        // valid ip address. use it as-is
+        addr = InetAddresses.forString(host);
+        // set hostname
+        addr = InetAddress.getByAddress(host, addr.getAddress());
       } else if (host.endsWith(".")) {
         // a rooted host ends with a dot, ex. "host."
         // rooted hosts never use the search path, so only try an exact lookup


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal Behaviour. Contributed by Ayush Saxena.

Posted by xy...@apache.org.
HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal Behaviour. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ad27e97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ad27e97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ad27e97

Branch: refs/heads/HDDS-4
Commit: 7ad27e97f05b13b33fdcef9cb63ace9c1728bfb5
Parents: 6fc293f
Author: Vinayakumar B <vi...@apache.org>
Authored: Thu Sep 20 09:31:35 2018 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Thu Sep 20 09:31:35 2018 +0530

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/fs/TrashPolicyDefault.java   | 8 +++++++-
 .../src/test/java/org/apache/hadoop/fs/TestTrash.java        | 6 ++++++
 2 files changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad27e97/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 6e101a2..39d5e73 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -101,6 +101,12 @@ public class TrashPolicyDefault extends TrashPolicy {
     this.emptierInterval = (long)(conf.getFloat(
         FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
         * MSECS_PER_MINUTE);
+    if (deletionInterval < 0) {
+      LOG.warn("Invalid value {} for deletion interval,"
+          + " deletion interaval can not be negative."
+          + "Changing to default value 0", deletionInterval);
+      this.deletionInterval = 0;
+    }
   }
 
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
@@ -109,7 +115,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 
   @Override
   public boolean isEnabled() {
-    return deletionInterval != 0;
+    return deletionInterval > 0;
   }
 
   @SuppressWarnings("deprecation")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad27e97/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 568821b..04f56fb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -132,6 +132,9 @@ public class TestTrash {
     conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
     assertFalse(new Trash(conf).isEnabled());
 
+    conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+    assertFalse(new Trash(conf).isEnabled());
+
     conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
     assertTrue(new Trash(conf).isEnabled());
 
@@ -526,6 +529,9 @@ public class TestTrash {
     conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
     assertFalse(new Trash(conf).isEnabled());
 
+    conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+    assertFalse(new Trash(conf).isEnabled());
+
     conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
     assertTrue(new Trash(conf).isEnabled());
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: HDDS-476. Add Pipeline reports to make pipeline active on SCM restart. Contributed by Mukul Kumar Singh.

Posted by xy...@apache.org.
HDDS-476. Add Pipeline reports to make pipeline active on SCM restart.
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0956ee2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0956ee2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0956ee2

Branch: refs/heads/HDDS-4
Commit: c0956ee2a879d1f82938dd2b8bab79b09ae32eac
Parents: 0712537e
Author: Nanda kumar <na...@apache.org>
Authored: Wed Sep 19 18:49:13 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Wed Sep 19 18:52:08 2018 +0530

----------------------------------------------------------------------
 .../hadoop/hdds/scm/XceiverClientRatis.java     |   2 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   5 +
 .../scm/container/common/helpers/Pipeline.java  |  22 ++-
 .../container/common/helpers/PipelineID.java    |  13 +-
 .../common/src/main/resources/ozone-default.xml |   8 +
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |  21 +++
 .../common/report/PipelineReportPublisher.java  |  73 +++++++++
 .../common/report/ReportPublisherFactory.java   |   4 +
 .../states/endpoint/RegisterEndpointTask.java   |   8 +-
 .../transport/server/XceiverServerGrpc.java     |  16 ++
 .../transport/server/XceiverServerSpi.java      |   9 ++
 .../server/ratis/XceiverServerRatis.java        |  63 ++++----
 .../container/ozoneimpl/OzoneContainer.java     |  12 ++
 .../StorageContainerDatanodeProtocol.java       |  10 +-
 .../protocol/StorageContainerNodeProtocol.java  |   6 +-
 ...rDatanodeProtocolClientSideTranslatorPB.java |   6 +-
 ...rDatanodeProtocolServerSideTranslatorPB.java |   5 +-
 .../StorageContainerDatanodeProtocol.proto      |  10 ++
 .../ozone/container/common/ScmTestMock.java     |   8 +-
 .../hdds/scm/container/ContainerMapping.java    |  19 ---
 .../scm/container/ContainerReportHandler.java   |   6 +-
 .../hadoop/hdds/scm/container/Mapping.java      |  15 +-
 .../hadoop/hdds/scm/events/SCMEvents.java       |  15 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |   5 +-
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  |  19 +--
 .../hdds/scm/node/states/Node2ContainerMap.java | 123 ++------------
 .../hdds/scm/node/states/Node2ObjectsMap.java   | 162 +++++++++++++++++++
 .../hdds/scm/node/states/ReportResult.java      | 105 ++++++------
 .../hdds/scm/pipelines/Node2PipelineMap.java    |  45 +-----
 .../scm/pipelines/PipelineCloseHandler.java     |  24 ++-
 .../hdds/scm/pipelines/PipelineManager.java     |  52 +++---
 .../scm/pipelines/PipelineReportHandler.java    |  59 +++++++
 .../hdds/scm/pipelines/PipelineSelector.java    | 103 ++++++------
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  41 ++---
 .../standalone/StandaloneManagerImpl.java       |  44 ++---
 .../server/SCMDatanodeHeartbeatDispatcher.java  |  23 +++
 .../scm/server/SCMDatanodeProtocolServer.java   |  16 +-
 .../scm/server/StorageContainerManager.java     |  11 +-
 .../org/apache/hadoop/hdds/scm/TestUtils.java   |  10 +-
 .../hdds/scm/container/MockNodeManager.java     |   4 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |   6 +-
 .../scm/node/states/TestNode2ContainerMap.java  |  28 ++--
 .../ozone/container/common/TestEndPoint.java    |   5 +-
 .../testutils/ReplicationNodeManagerMock.java   |   5 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |   2 +-
 .../hdds/scm/pipeline/TestPipelineClose.java    |   2 +-
 .../hdds/scm/pipeline/TestSCMRestart.java       |  20 ++-
 .../apache/hadoop/ozone/RatisTestHelper.java    |   9 ++
 .../hadoop/ozone/web/client/TestKeys.java       |   2 +-
 .../hadoop/ozone/web/client/TestKeysRatis.java  |   2 -
 hadoop-project/pom.xml                          |   2 +-
 51 files changed, 809 insertions(+), 476 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 946abfb..4c4de7f 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -110,7 +110,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
     final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
     LOG.debug("destroying pipeline:{} with {}", pipeline.getId(), group);
     callRatisRpc(pipeline.getMachines(), (raftClient, peer) -> raftClient
-        .groupRemove(group.getGroupId(), peer.getId()));
+        .groupRemove(group.getGroupId(), true, peer.getId()));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 492be82..856d113 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -46,6 +46,11 @@ public final class HddsConfigKeys {
   public static final String HDDS_CONTAINER_REPORT_INTERVAL_DEFAULT =
       "60s";
 
+  public static final String HDDS_PIPELINE_REPORT_INTERVAL =
+          "hdds.pipeline.report.interval";
+  public static final String HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT =
+          "60s";
+
   public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
       "hdds.command.status.report.interval";
   public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
index ef148e5..777efa7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Map;
-import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.List;
 
 /**
@@ -83,7 +83,7 @@ public class Pipeline {
     this.type = replicationType;
     this.factor = replicationFactor;
     this.id = id;
-    datanodes = new TreeMap<>();
+    datanodes = new ConcurrentHashMap<>();
   }
 
   @Override
@@ -151,9 +151,21 @@ public class Pipeline {
     return getDatanodes().get(leaderID);
   }
 
-  public void addMember(DatanodeDetails datanodeDetails) {
-    datanodes.put(datanodeDetails.getUuid().toString(),
-        datanodeDetails);
+  /**
+   * Adds a datanode to pipeline
+   * @param datanodeDetails datanode to be added.
+   * @return true if the dn was not earlier present, false otherwise
+   */
+  public boolean addMember(DatanodeDetails datanodeDetails) {
+    return datanodes.put(datanodeDetails.getUuid().toString(),
+        datanodeDetails) == null;
+
+  }
+
+  public void resetPipeline() {
+    // reset datanodes in pipeline and learn about them through
+    // pipeline reports on SCM restart
+    datanodes.clear();
   }
 
   public Map<String, DatanodeDetails> getDatanodes() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java
index 473ebc5..6e27a71 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineID.java
@@ -28,7 +28,7 @@ import java.util.UUID;
  * in Ratis as RaftGroupId, GroupID is used by the datanodes to initialize
  * the ratis group they are part of.
  */
-public class PipelineID {
+public final class PipelineID implements Comparable<PipelineID> {
 
   private UUID id;
   private RaftGroupId groupId;
@@ -42,8 +42,12 @@ public class PipelineID {
     return new PipelineID(UUID.randomUUID());
   }
 
+  public static PipelineID valueOf(UUID id) {
+    return new PipelineID(id);
+  }
+
   public static PipelineID valueOf(RaftGroupId groupId) {
-    return new PipelineID(groupId.getUuid());
+    return valueOf(groupId.getUuid());
   }
 
   public RaftGroupId getRaftGroupID() {
@@ -68,6 +72,11 @@ public class PipelineID {
   }
 
   @Override
+  public int compareTo(PipelineID o) {
+    return this.id.compareTo(o.id);
+  }
+
+  @Override
   public boolean equals(Object o) {
     if (this == o) {
       return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index a74124e..f7681e8 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -224,6 +224,14 @@
       received from SCM to SCM. Unit could be defined with postfix
       (ns,ms,s,m,h,d)</description>
   </property>
+  <property>
+    <name>hdds.pipeline.report.interval</name>
+    <value>60000ms</value>
+    <tag>OZONE, PIPELINE, MANAGEMENT</tag>
+    <description>Time interval of the datanode to send pipeline report. Each
+      datanode periodically send pipeline report to SCM. Unit could be
+      defined with postfix (ns,ms,s,m,h,d)</description>
+  </property>
   <!--Ozone Settings-->
   <property>
     <name>ozone.administrators</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
index 580d027..d505be3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
@@ -18,12 +18,15 @@
 package org.apache.hadoop.hdds.scm;
 
 import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.File;
 import java.net.InetSocketAddress;
 import java.util.HashMap;
 import java.util.Map;
@@ -312,4 +315,22 @@ public final class HddsServerUtil {
     services.put(OZONE_SCM_SERVICE_ID, serviceInstances);
     return services;
   }
+
+  public static String getOzoneDatanodeRatisDirectory(Configuration conf) {
+    final String ratisDir = File.separator + "ratis";
+    String storageDir = conf.get(
+            OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
+
+    if (Strings.isNullOrEmpty(storageDir)) {
+      storageDir = conf.get(OzoneConfigKeys
+              .OZONE_METADATA_DIRS);
+      Preconditions.checkNotNull(storageDir, "ozone.metadata.dirs " +
+              "cannot be null, Please check your configs.");
+      storageDir = storageDir.concat(ratisDir);
+      LOG.warn("Storage directory for Ratis is not configured." +
+               "Mapping Ratis storage under {}. It is a good idea " +
+               "to map this to an SSD disk.", storageDir);
+    }
+    return storageDir;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java
new file mode 100644
index 0000000..e7f4347
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/PipelineReportPublisher.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.report;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT;
+
+
+/**
+ * Publishes Pipeline which will be sent to SCM as part of heartbeat.
+ * PipelineReport consist of the following information about each containers:
+ *   - pipelineID
+ *
+ */
+public class PipelineReportPublisher extends
+    ReportPublisher<PipelineReportsProto> {
+
+  private Long pipelineReportInterval = null;
+
+  @Override
+  protected long getReportFrequency() {
+    if (pipelineReportInterval == null) {
+      pipelineReportInterval = getConf().getTimeDuration(
+          HDDS_PIPELINE_REPORT_INTERVAL,
+          HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT,
+          TimeUnit.MILLISECONDS);
+
+      long heartbeatFrequency = HddsServerUtil.getScmHeartbeatInterval(
+          getConf());
+
+      Preconditions.checkState(
+          heartbeatFrequency <= pipelineReportInterval,
+              HDDS_PIPELINE_REPORT_INTERVAL +
+              " cannot be configured lower than heartbeat frequency.");
+    }
+    // Add a random delay (0~30s) on top of the pipeline report
+    // interval (60s) so tha the SCM is overwhelmed by the pipeline reports
+    // sent in sync.
+    return pipelineReportInterval + getRandomReportDelay();
+  }
+
+  private long getRandomReportDelay() {
+    return RandomUtils.nextLong(0, pipelineReportInterval);
+  }
+
+  @Override
+  protected PipelineReportsProto getReport() {
+    return getContext().getParent().getContainer().getPipelineReport();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
index ea89280..1c456a0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.container.common.report;
 import com.google.protobuf.GeneratedMessage;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.proto.
+        StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -53,6 +55,8 @@ public class ReportPublisherFactory {
         ContainerReportPublisher.class);
     report2publisher.put(CommandStatusReportsProto.class,
         CommandStatusReportPublisher.class);
+    report2publisher.put(PipelineReportsProto.class,
+            PipelineReportPublisher.class);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index ccab095..690aa01 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -21,6 +21,8 @@ import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.ozone.container.common.statemachine
     .EndpointStateMachine;
 import org.apache.hadoop.hdds.protocol.proto
@@ -108,13 +110,15 @@ public final class RegisterEndpointTask implements
     rpcEndPoint.lock();
     try {
 
-      ContainerReportsProto contianerReport = datanodeContainerManager
+      ContainerReportsProto containerReport = datanodeContainerManager
           .getContainerReport();
       NodeReportProto nodeReport = datanodeContainerManager.getNodeReport();
+      PipelineReportsProto pipelineReportsProto =
+              datanodeContainerManager.getPipelineReport();
       // TODO : Add responses to the command Queue.
       SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint()
           .register(datanodeDetails.getProtoBufMessage(), nodeReport,
-              contianerReport);
+                  containerReport, pipelineReportsProto);
       Preconditions.checkState(UUID.fromString(response.getDatanodeUUID())
               .equals(datanodeDetails.getUuid()),
           "Unexpected datanode ID in the response.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 4a90144..83e742c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -24,6 +24,9 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReport;
+import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 
@@ -38,6 +41,9 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketAddress;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
 
 /**
  * Creates a Grpc server endpoint that acts as the communication layer for
@@ -47,6 +53,7 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
   private static final Logger
       LOG = LoggerFactory.getLogger(XceiverServerGrpc.class);
   private int port;
+  private UUID id;
   private Server server;
   private final ContainerDispatcher storageContainer;
 
@@ -59,6 +66,7 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
       ContainerDispatcher dispatcher, BindableService... additionalServices) {
     Preconditions.checkNotNull(conf);
 
+    this.id = datanodeDetails.getUuid();
     this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
         OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
     // Get an available port on current node and
@@ -123,4 +131,12 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
       HddsProtos.PipelineID pipelineID) {
     storageContainer.dispatch(request);
   }
+
+  @Override
+  public List<PipelineReport> getPipelineReport() {
+    return Collections.singletonList(
+            PipelineReport.newBuilder()
+                    .setPipelineID(PipelineID.valueOf(id).getProtobuf())
+                    .build());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
index 1863f6d..8c3fa5c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java
@@ -21,8 +21,11 @@ package org.apache.hadoop.ozone.container.common.transport.server;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReport;
 
 import java.io.IOException;
+import java.util.List;
 
 /** A server endpoint that acts as the communication layer for Ozone
  * containers. */
@@ -49,4 +52,10 @@ public interface XceiverServerSpi {
   void submitRequest(ContainerCommandRequestProto request,
       HddsProtos.PipelineID pipelineID)
       throws IOException;
+
+  /**
+   * Get pipeline report for the XceiverServer instance.
+   * @return list of report for each pipeline.
+   */
+  List<PipelineReport> getPipelineReport();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 24ea0b9..d88995b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -19,17 +19,18 @@
 package org.apache.hadoop.ozone.container.common.transport.server.ratis;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReport;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineAction;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
@@ -68,6 +69,8 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Objects;
 import java.util.UUID;
 import java.util.concurrent.ArrayBlockingQueue;
@@ -96,12 +99,12 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   private final ReplicationLevel replicationLevel;
   private long nodeFailureTimeoutMs;
 
-  private XceiverServerRatis(DatanodeDetails dd, int port, String storageDir,
+  private XceiverServerRatis(DatanodeDetails dd, int port,
       ContainerDispatcher dispatcher, Configuration conf, StateContext context)
       throws IOException {
     Objects.requireNonNull(dd, "id == null");
     this.port = port;
-    RaftProperties serverProperties = newRaftProperties(conf, storageDir);
+    RaftProperties serverProperties = newRaftProperties(conf);
     final int numWriteChunkThreads = conf.getInt(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY,
         OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT);
@@ -118,15 +121,13 @@ public final class XceiverServerRatis implements XceiverServerSpi {
         new ContainerStateMachine(dispatcher, chunkExecutor, this);
     this.server = RaftServer.newBuilder()
         .setServerId(RatisHelper.toRaftPeerId(dd))
-        .setGroup(RatisHelper.emptyRaftGroup())
         .setProperties(serverProperties)
         .setStateMachine(stateMachine)
         .build();
   }
 
 
-  private RaftProperties newRaftProperties(Configuration conf,
-      String storageDir) {
+  private RaftProperties newRaftProperties(Configuration conf) {
     final RaftProperties properties = new RaftProperties();
 
     // Set rpc type
@@ -235,6 +236,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     nodeFailureTimeoutMs = nodeFailureTimeout.toLong(TimeUnit.MILLISECONDS);
 
     // Set the ratis storage directory
+    String storageDir = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf);
     RaftServerConfigKeys.setStorageDir(properties, new File(storageDir));
 
     // For grpc set the maximum message size
@@ -253,23 +255,9 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   public static XceiverServerRatis newXceiverServerRatis(
       DatanodeDetails datanodeDetails, Configuration ozoneConf,
       ContainerDispatcher dispatcher, StateContext context) throws IOException {
-    final String ratisDir = File.separator + "ratis";
     int localPort = ozoneConf.getInt(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
         OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT);
-    String storageDir = ozoneConf.get(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
-
-    if (Strings.isNullOrEmpty(storageDir)) {
-      storageDir = ozoneConf.get(OzoneConfigKeys
-          .OZONE_METADATA_DIRS);
-      Preconditions.checkNotNull(storageDir, "ozone.metadata.dirs " +
-          "cannot be null, Please check your configs.");
-      storageDir = storageDir.concat(ratisDir);
-      LOG.warn("Storage directory for Ratis is not configured. Mapping Ratis " +
-              "storage under {}. It is a good idea to map this to an SSD disk.",
-          storageDir);
-    }
 
     // Get an available port on current node and
     // use that as the container port
@@ -282,13 +270,6 @@ public final class XceiverServerRatis implements XceiverServerSpi {
         socket.bind(address);
         localPort = socket.getLocalPort();
         LOG.info("Found a free port for the server : {}", localPort);
-        // If we have random local ports configured this means that it
-        // probably running under MiniOzoneCluster. Ratis locks the storage
-        // directories, so we need to pass different local directory for each
-        // local instance. So we map ratis directories under datanode ID.
-        storageDir =
-            storageDir.concat(File.separator +
-                datanodeDetails.getUuidString());
       } catch (IOException e) {
         LOG.error("Unable find a random free port for the server, "
             + "fallback to use default port {}", localPort, e);
@@ -296,7 +277,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     }
     datanodeDetails.setPort(
         DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, localPort));
-    return new XceiverServerRatis(datanodeDetails, localPort, storageDir,
+    return new XceiverServerRatis(datanodeDetails, localPort,
         dispatcher, ozoneConf, context);
   }
 
@@ -363,7 +344,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   public void submitRequest(
       ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID)
       throws IOException {
-    // ReplicationLevel.ALL ensures the transactions corresponding to
+    // ReplicationLevel.MAJORITY ensures the transactions corresponding to
     // the request here are applied on all the raft servers.
     RaftClientRequest raftClientRequest =
         createRaftClientRequest(request, pipelineID,
@@ -427,13 +408,27 @@ public final class XceiverServerRatis implements XceiverServerSpi {
             + ".Reason : " + action.getClosePipeline().getDetailedReason());
   }
 
-  void handleNodeSlowness(
-      RaftGroup group, RoleInfoProto roleInfoProto) {
+  @Override
+  public List<PipelineReport> getPipelineReport() {
+    try {
+      Iterable<RaftGroupId> gids = server.getGroupIds();
+      List<PipelineReport> reports = new ArrayList<>();
+      for (RaftGroupId groupId : gids) {
+        reports.add(PipelineReport.newBuilder()
+                .setPipelineID(PipelineID.valueOf(groupId).getProtobuf())
+                .build());
+      }
+      return reports;
+    } catch (Exception e) {
+      return null;
+    }
+  }
+
+  void handleNodeSlowness(RaftGroup group, RoleInfoProto roleInfoProto) {
     handlePipelineFailure(group.getGroupId(), roleInfoProto);
   }
 
-  void handleNoLeader(
-      RaftGroup group, RoleInfoProto roleInfoProto) {
+  void handleNoLeader(RaftGroup group, RoleInfoProto roleInfoProto) {
     handlePipelineFailure(group.getGroupId(), roleInfoProto);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 72a5804..ebacf75 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
@@ -164,6 +166,16 @@ public class OzoneContainer {
     return this.containerSet.getContainerReport();
   }
 
+  public PipelineReportsProto getPipelineReport() {
+    PipelineReportsProto.Builder pipelineReportsProto =
+            PipelineReportsProto.newBuilder();
+    for (XceiverServerSpi serverInstance : server) {
+      pipelineReportsProto
+              .addAllPipelineReport(serverInstance.getPipelineReport());
+    }
+    return pipelineReportsProto.build();
+  }
+
   /**
    * Submit ContainerRequest.
    * @param request

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index a950a31..9296524 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.protocol;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
@@ -69,9 +71,11 @@ public interface StorageContainerDatanodeProtocol {
    * @param containerReportsRequestProto - Container Reports.
    * @return SCM Command.
    */
-  SCMRegisteredResponseProto register(DatanodeDetailsProto datanodeDetails,
-      NodeReportProto nodeReport, ContainerReportsProto
-      containerReportsRequestProto) throws IOException;
+  SCMRegisteredResponseProto register(
+          DatanodeDetailsProto datanodeDetails,
+          NodeReportProto nodeReport,
+          ContainerReportsProto containerReportsRequestProto,
+          PipelineReportsProto pipelineReports) throws IOException;
 
   /**
    * Used by datanode to send block deletion ACK to SCM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
index c9ef43f..b3c3eb3 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.protocol;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
@@ -51,10 +53,12 @@ public interface StorageContainerNodeProtocol {
    * Register the node if the node finds that it is not registered with any SCM.
    * @param datanodeDetails DatanodeDetails
    * @param nodeReport NodeReportProto
+   * @param pipelineReport PipelineReportsProto
    * @return  SCMHeartbeatResponseProto
    */
   RegisteredCommand register(DatanodeDetails datanodeDetails,
-                             NodeReportProto nodeReport);
+                             NodeReportProto nodeReport,
+                             PipelineReportsProto pipelineReport);
 
   /**
    * Send heartbeat to indicate the datanode is alive and doing well.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index 40fe189..b9cf6f9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -20,6 +20,8 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
@@ -149,12 +151,14 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
   @Override
   public SCMRegisteredResponseProto register(
       DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport,
-      ContainerReportsProto containerReportsRequestProto)
+      ContainerReportsProto containerReportsRequestProto,
+      PipelineReportsProto pipelineReportsProto)
       throws IOException {
     SCMRegisterRequestProto.Builder req =
         SCMRegisterRequestProto.newBuilder();
     req.setDatanodeDetails(datanodeDetailsProto);
     req.setContainerReport(containerReportsRequestProto);
+    req.setPipelineReports(pipelineReportsProto);
     req.setNodeReport(nodeReport);
     final SCMRegisteredResponseProto response;
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
index 7e8bd8a..ed01822 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.protocolPB;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
@@ -76,8 +78,9 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
       ContainerReportsProto containerRequestProto = request
           .getContainerReport();
       NodeReportProto dnNodeReport = request.getNodeReport();
+      PipelineReportsProto pipelineReport = request.getPipelineReports();
       return impl.register(request.getDatanodeDetails(), dnNodeReport,
-          containerRequestProto);
+          containerRequestProto, pipelineReport);
     } catch (IOException e) {
       throw new ServiceException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 0a69343..78758cb 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -52,6 +52,7 @@ message SCMRegisterRequestProto {
   required DatanodeDetailsProto datanodeDetails = 1;
   required NodeReportProto nodeReport = 2;
   required ContainerReportsProto containerReport = 3;
+  required PipelineReportsProto pipelineReports = 4;
 }
 
 /**
@@ -82,6 +83,7 @@ message SCMHeartbeatRequestProto {
   optional CommandStatusReportsProto commandStatusReport = 4;
   optional ContainerActionsProto containerActions = 5;
   optional PipelineActionsProto pipelineActions = 6;
+  optional PipelineReportsProto pipelineReports = 7;
 }
 
 /*
@@ -163,6 +165,14 @@ message ContainerAction {
   optional Reason reason = 3;
 }
 
+message PipelineReport {
+  required PipelineID pipelineID = 1;
+}
+
+message PipelineReportsProto {
+  repeated PipelineReport pipelineReport = 1;
+}
+
 message PipelineActionsProto {
   repeated PipelineAction pipelineActions = 1;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 751775f..27b6272 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -18,6 +18,10 @@ package org.apache.hadoop.ozone.container.common;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.protocol.proto.
     StorageContainerDatanodeProtocolProtos.CommandStatus;
 import org.apache.hadoop.hdds.scm.VersionInfo;
@@ -214,8 +218,8 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
   public StorageContainerDatanodeProtocolProtos
       .SCMRegisteredResponseProto register(
           DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport,
-          StorageContainerDatanodeProtocolProtos.ContainerReportsProto
-              containerReportsRequestProto)
+          ContainerReportsProto containerReportsRequestProto,
+          PipelineReportsProto pipelineReportsProto)
       throws IOException {
     rpcCount.incrementAndGet();
     updateNodeReport(datanodeDetailsProto, nodeReport);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 206e24b..eb0a0b4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -466,24 +466,6 @@ public class ContainerMapping implements Mapping {
     return new ContainerWithPipeline(containerInfo, pipeline);
   }
 
-  public void handlePipelineClose(PipelineID pipelineID) {
-    try {
-      Pipeline pipeline = pipelineSelector.getPipeline(pipelineID);
-      if (pipeline != null) {
-        pipelineSelector.finalizePipeline(pipeline);
-      } else {
-        LOG.debug("pipeline:{} not found", pipelineID);
-      }
-    } catch (Exception e) {
-      LOG.info("failed to close pipeline:{}", pipelineID, e);
-    }
-  }
-
-  public Set<PipelineID> getPipelineOnDatanode(
-      DatanodeDetails datanodeDetails) {
-    return pipelineSelector.getPipelineId(datanodeDetails.getUuid());
-  }
-
   /**
    * Process container report from Datanode.
    * <p>
@@ -710,7 +692,6 @@ public class ContainerMapping implements Mapping {
     return containerStore;
   }
 
-  @VisibleForTesting
   public PipelineSelector getPipelineSelector() {
     return pipelineSelector;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index dcbd49c..3f156de 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -89,20 +89,20 @@ public class ContainerReportHandler implements
           .map(ContainerID::new)
           .collect(Collectors.toSet());
 
-      ReportResult reportResult = node2ContainerMap
+      ReportResult<ContainerID> reportResult = node2ContainerMap
           .processReport(datanodeOrigin.getUuid(), containerIds);
 
       //we have the report, so we can update the states for the next iteration.
       node2ContainerMap
           .setContainersForDatanode(datanodeOrigin.getUuid(), containerIds);
 
-      for (ContainerID containerID : reportResult.getMissingContainers()) {
+      for (ContainerID containerID : reportResult.getMissingEntries()) {
         containerStateManager
             .removeContainerReplica(containerID, datanodeOrigin);
         checkReplicationState(containerID, publisher);
       }
 
-      for (ContainerID containerID : reportResult.getNewContainers()) {
+      for (ContainerID containerID : reportResult.getNewEntries()) {
         containerStateManager.addContainerReplica(containerID, datanodeOrigin);
         checkReplicationState(containerID, publisher);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
index 1b0c57c..5ed80cb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
@@ -25,13 +25,12 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 
 /**
  * Mapping class contains the mapping from a name to a pipeline mapping. This is
@@ -138,15 +137,5 @@ public interface Mapping extends Closeable {
       String owner, ReplicationType type, ReplicationFactor factor,
       LifeCycleState state) throws IOException;
 
-  /**
-   * Handle a pipeline close event.
-   * @param pipelineID pipeline id
-   */
-  void handlePipelineClose(PipelineID pipelineID);
-
-  /**
-   * Get set of pipeline for a specific datanode.
-   * @param datanodeDetails datanode for which pipelines needs to be fetched.
-   */
-  Set<PipelineID> getPipelineOnDatanode(DatanodeDetails datanodeDetails);
+  PipelineSelector getPipelineSelector();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
index 9d72eb1..745e052 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -27,10 +27,13 @@ import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler
     .DeleteBlockCommandStatus;
 import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler
     .ReplicationStatus;
-import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler.CloseContainerRetryableReq;
+import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler
+        .CloseContainerRetryableReq;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
+        .PipelineReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .PipelineActionsFromDatanode;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .ContainerActionsFromDatanode;
@@ -72,8 +75,7 @@ public final class SCMEvents {
 
   /**
    * ContainerReports are send out by Datanodes. This report is received by
-   * SCMDatanodeHeartbeatDispatcher and Container_Report Event
-   * isTestSCMDatanodeHeartbeatDispatcher generated.
+   * SCMDatanodeHeartbeatDispatcher and Container_Report Event is generated.
    */
   public static final TypedEvent<ContainerReportFromDatanode> CONTAINER_REPORT =
       new TypedEvent<>(ContainerReportFromDatanode.class, "Container_Report");
@@ -87,6 +89,13 @@ public final class SCMEvents {
       "Container_Actions");
 
   /**
+   * PipelineReports are send out by Datanodes. This report is received by
+   * SCMDatanodeHeartbeatDispatcher and Pipeline_Report Event is generated.
+   */
+  public static final TypedEvent<PipelineReportFromDatanode> PIPELINE_REPORT =
+          new TypedEvent<>(PipelineReportFromDatanode.class, "Pipeline_Report");
+
+  /**
    * PipelineActions are sent by Datanode. This event is received by
    * SCMDatanodeHeartbeatDispatcher and PIPELINE_ACTIONS event is generated.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index fca08bd..58da1cc 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdds.scm.node;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@@ -363,7 +365,8 @@ public class SCMNodeManager
    */
   @Override
   public RegisteredCommand register(
-      DatanodeDetails datanodeDetails, NodeReportProto nodeReport) {
+      DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
+      PipelineReportsProto pipelineReportsProto) {
 
     InetAddress dnAddress = Server.getRemoteIp();
     if (dnAddress != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
index b435e77..ddbba82 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
@@ -19,17 +19,13 @@
 package org.apache.hadoop.hdds.scm.node;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.container.Mapping;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap;
+import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.Set;
-
 /**
  * Handles Stale node event.
  */
@@ -37,22 +33,17 @@ public class StaleNodeHandler implements EventHandler<DatanodeDetails> {
   static final Logger LOG = LoggerFactory.getLogger(StaleNodeHandler.class);
 
   private final Node2ContainerMap node2ContainerMap;
-  private final Mapping containerManager;
+  private final PipelineSelector pipelineSelector;
 
   public StaleNodeHandler(Node2ContainerMap node2ContainerMap,
-      Mapping containerManager) {
+      PipelineSelector pipelineSelector) {
     this.node2ContainerMap = node2ContainerMap;
-    this.containerManager = containerManager;
+    this.pipelineSelector = pipelineSelector;
   }
 
   @Override
   public void onMessage(DatanodeDetails datanodeDetails,
                         EventPublisher publisher) {
-    Set<PipelineID> pipelineIDs =
-        containerManager.getPipelineOnDatanode(datanodeDetails);
-    for (PipelineID id : pipelineIDs) {
-      LOG.info("closing pipeline {}.", id);
-      publisher.fireEvent(SCMEvents.PIPELINE_CLOSE, id);
-    }
+    pipelineSelector.handleStaleNode(datanodeDetails);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
index 97c254b..549080a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java
@@ -18,13 +18,9 @@
 
 package org.apache.hadoop.hdds.scm.node.states;
 
-import java.util.Collections;
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
-import java.util.TreeSet;
 import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -32,34 +28,29 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .DUPLICATE_DATANODE;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
     .NO_SUCH_DATANODE;
 
 /**
  * This data structure maintains the list of containers that is on a datanode.
  * This information is built from the DN container reports.
  */
-public class Node2ContainerMap {
-  private final Map<UUID, Set<ContainerID>> dn2ContainerMap;
+public class Node2ContainerMap extends Node2ObjectsMap<ContainerID> {
 
   /**
    * Constructs a Node2ContainerMap Object.
    */
   public Node2ContainerMap() {
-    dn2ContainerMap = new ConcurrentHashMap<>();
+    super();
   }
 
   /**
-   * Returns true if this a datanode that is already tracked by
-   * Node2ContainerMap.
+   * Returns null if there no containers associated with this datanode ID.
    *
-   * @param datanodeID - UUID of the Datanode.
-   * @return True if this is tracked, false if this map does not know about it.
+   * @param datanode - UUID
+   * @return Set of containers or Null.
    */
-  public boolean isKnownDatanode(UUID datanodeID) {
-    Preconditions.checkNotNull(datanodeID);
-    return dn2ContainerMap.containsKey(datanodeID);
+  public Set<ContainerID> getContainers(UUID datanode) {
+    return getObjects(datanode);
   }
 
   /**
@@ -70,13 +61,7 @@ public class Node2ContainerMap {
    */
   public void insertNewDatanode(UUID datanodeID, Set<ContainerID> containerIDs)
       throws SCMException {
-    Preconditions.checkNotNull(containerIDs);
-    Preconditions.checkNotNull(datanodeID);
-    if (dn2ContainerMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs))
-        != null) {
-      throw new SCMException("Node already exists in the map",
-          DUPLICATE_DATANODE);
-    }
+    super.insertNewDatanode(datanodeID, containerIDs);
   }
 
   /**
@@ -91,103 +76,15 @@ public class Node2ContainerMap {
       Set<ContainerID> containers) throws SCMException {
     Preconditions.checkNotNull(datanodeID);
     Preconditions.checkNotNull(containers);
-    if (dn2ContainerMap
+    if (dn2ObjectMap
         .computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers))
         == null) {
       throw new SCMException("No such datanode", NO_SUCH_DATANODE);
     }
   }
 
-  /**
-   * Removes datanode Entry from the map.
-   *
-   * @param datanodeID - Datanode ID.
-   */
-  public void removeDatanode(UUID datanodeID) {
-    Preconditions.checkNotNull(datanodeID);
-    dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> null);
-  }
-
-  /**
-   * Returns null if there no containers associated with this datanode ID.
-   *
-   * @param datanode - UUID
-   * @return Set of containers or Null.
-   */
-  public Set<ContainerID> getContainers(UUID datanode) {
-    Preconditions.checkNotNull(datanode);
-    return dn2ContainerMap.computeIfPresent(datanode, (k, v) ->
-        Collections.unmodifiableSet(v));
-  }
-
-  public ReportResult processReport(UUID datanodeID, Set<ContainerID>
-      containers) {
-    Preconditions.checkNotNull(datanodeID);
-    Preconditions.checkNotNull(containers);
-
-    if (!isKnownDatanode(datanodeID)) {
-      return ReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.NEW_DATANODE_FOUND)
-          .setNewContainers(containers)
-          .build();
-    }
-
-    // Conditions like Zero length containers should be handled by removeAll.
-    Set<ContainerID> currentSet = dn2ContainerMap.get(datanodeID);
-    TreeSet<ContainerID> newContainers = new TreeSet<>(containers);
-    newContainers.removeAll(currentSet);
-
-    TreeSet<ContainerID> missingContainers = new TreeSet<>(currentSet);
-    missingContainers.removeAll(containers);
-
-    if (newContainers.isEmpty() && missingContainers.isEmpty()) {
-      return ReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.ALL_IS_WELL)
-          .build();
-    }
-
-    if (newContainers.isEmpty() && !missingContainers.isEmpty()) {
-      return ReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.MISSING_CONTAINERS)
-          .setMissingContainers(missingContainers)
-          .build();
-    }
-
-    if (!newContainers.isEmpty() && missingContainers.isEmpty()) {
-      return ReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.NEW_CONTAINERS_FOUND)
-          .setNewContainers(newContainers)
-          .build();
-    }
-
-    if (!newContainers.isEmpty() && !missingContainers.isEmpty()) {
-      return ReportResult.ReportResultBuilder.newBuilder()
-          .setStatus(ReportStatus.MISSING_AND_NEW_CONTAINERS_FOUND)
-          .setNewContainers(newContainers)
-          .setMissingContainers(missingContainers)
-          .build();
-    }
-
-    // default status & Make compiler happy
-    return ReportResult.ReportResultBuilder.newBuilder()
-        .setStatus(ReportStatus.ALL_IS_WELL)
-        .build();
-  }
-
-  /**
-   * Results possible from processing a container report by
-   * Node2ContainerMapper.
-   */
-  public enum ReportStatus {
-    ALL_IS_WELL,
-    MISSING_CONTAINERS,
-    NEW_CONTAINERS_FOUND,
-    MISSING_AND_NEW_CONTAINERS_FOUND,
-    NEW_DATANODE_FOUND
-  }
-
   @VisibleForTesting
   public int size() {
-    return dn2ContainerMap.size();
+    return dn2ObjectMap.size();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
new file mode 100644
index 0000000..e49a79c
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+
+import java.util.UUID;
+import java.util.Set;
+import java.util.Map;
+import java.util.TreeSet;
+import java.util.HashSet;
+import java.util.Collections;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
+
+/**
+ * This data structure maintains the list of containers that is on a datanode.
+ * This information is built from the DN container reports.
+ */
+public class Node2ObjectsMap<T> {
+  protected final Map<UUID, Set<T>> dn2ObjectMap;
+
+  /**
+   * Constructs a Node2ContainerMap Object.
+   */
+  public Node2ObjectsMap() {
+    dn2ObjectMap = new ConcurrentHashMap<>();
+  }
+
+  /**
+   * Returns true if this a datanode that is already tracked by
+   * Node2ContainerMap.
+   *
+   * @param datanodeID - UUID of the Datanode.
+   * @return True if this is tracked, false if this map does not know about it.
+   */
+  public boolean isKnownDatanode(UUID datanodeID) {
+    Preconditions.checkNotNull(datanodeID);
+    return dn2ObjectMap.containsKey(datanodeID);
+  }
+
+  /**
+   * Insert a new datanode into Node2Container Map.
+   *
+   * @param datanodeID   -- Datanode UUID
+   * @param containerIDs - List of ContainerIDs.
+   */
+  public void insertNewDatanode(UUID datanodeID, Set<T> containerIDs)
+      throws SCMException {
+    Preconditions.checkNotNull(containerIDs);
+    Preconditions.checkNotNull(datanodeID);
+    if (dn2ObjectMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs))
+        != null) {
+      throw new SCMException("Node already exists in the map",
+          DUPLICATE_DATANODE);
+    }
+  }
+
+  /**
+   * Removes datanode Entry from the map.
+   *
+   * @param datanodeID - Datanode ID.
+   */
+  void removeDatanode(UUID datanodeID) {
+    Preconditions.checkNotNull(datanodeID);
+    dn2ObjectMap.computeIfPresent(datanodeID, (k, v) -> null);
+  }
+
+  /**
+   * Returns null if there no containers associated with this datanode ID.
+   *
+   * @param datanode - UUID
+   * @return Set of containers or Null.
+   */
+  Set<T> getObjects(UUID datanode) {
+    Preconditions.checkNotNull(datanode);
+    final Set<T> s = dn2ObjectMap.get(datanode);
+    return s != null? Collections.unmodifiableSet(s): Collections.emptySet();
+  }
+
+  public ReportResult.ReportResultBuilder<T> newBuilder() {
+    return new ReportResult.ReportResultBuilder<>();
+  }
+
+  public ReportResult<T> processReport(UUID datanodeID, Set<T> objects) {
+    Preconditions.checkNotNull(datanodeID);
+    Preconditions.checkNotNull(objects);
+
+    if (!isKnownDatanode(datanodeID)) {
+      return newBuilder()
+          .setStatus(ReportResult.ReportStatus.NEW_DATANODE_FOUND)
+          .setNewEntries(objects)
+          .build();
+    }
+
+    // Conditions like Zero length containers should be handled by removeAll.
+    Set<T> currentSet = dn2ObjectMap.get(datanodeID);
+    TreeSet<T> newObjects = new TreeSet<>(objects);
+    newObjects.removeAll(currentSet);
+
+    TreeSet<T> missingObjects = new TreeSet<>(currentSet);
+    missingObjects.removeAll(objects);
+
+    if (newObjects.isEmpty() && missingObjects.isEmpty()) {
+      return newBuilder()
+          .setStatus(ReportResult.ReportStatus.ALL_IS_WELL)
+          .build();
+    }
+
+    if (newObjects.isEmpty() && !missingObjects.isEmpty()) {
+      return newBuilder()
+          .setStatus(ReportResult.ReportStatus.MISSING_ENTRIES)
+          .setMissingEntries(missingObjects)
+          .build();
+    }
+
+    if (!newObjects.isEmpty() && missingObjects.isEmpty()) {
+      return newBuilder()
+          .setStatus(ReportResult.ReportStatus.NEW_ENTRIES_FOUND)
+          .setNewEntries(newObjects)
+          .build();
+    }
+
+    if (!newObjects.isEmpty() && !missingObjects.isEmpty()) {
+      return newBuilder()
+          .setStatus(ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND)
+          .setNewEntries(newObjects)
+          .setMissingEntries(missingObjects)
+          .build();
+    }
+
+    // default status & Make compiler happy
+    return newBuilder()
+        .setStatus(ReportResult.ReportStatus.ALL_IS_WELL)
+        .build();
+  }
+
+  @VisibleForTesting
+  public int size() {
+    return dn2ObjectMap.size();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
index 9bb6cf1..0c7610f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/ReportResult.java
@@ -19,83 +19,92 @@
 
 package org.apache.hadoop.hdds.scm.node.states;
 
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-
 import java.util.Collections;
 import java.util.Set;
 
 import com.google.common.base.Preconditions;
 
 /**
- * A Container Report gets processsed by the Node2Container and returns
- * Report Result class.
+ * A Container/Pipeline Report gets processed by the
+ * Node2Container/Node2Pipeline and returns Report Result class.
  */
-public class ReportResult {
-  private Node2ContainerMap.ReportStatus status;
-  private Set<ContainerID> missingContainers;
-  private Set<ContainerID> newContainers;
-
-  ReportResult(Node2ContainerMap.ReportStatus status,
-      Set<ContainerID> missingContainers,
-      Set<ContainerID> newContainers) {
+public final class ReportResult<T> {
+  private ReportStatus status;
+  private Set<T> missingEntries;
+  private Set<T> newEntries;
+
+  private ReportResult(ReportStatus status,
+      Set<T> missingEntries,
+      Set<T> newEntries) {
     this.status = status;
-    Preconditions.checkNotNull(missingContainers);
-    Preconditions.checkNotNull(newContainers);
-    this.missingContainers = missingContainers;
-    this.newContainers = newContainers;
+    Preconditions.checkNotNull(missingEntries);
+    Preconditions.checkNotNull(newEntries);
+    this.missingEntries = missingEntries;
+    this.newEntries = newEntries;
   }
 
-  public Node2ContainerMap.ReportStatus getStatus() {
+  public ReportStatus getStatus() {
     return status;
   }
 
-  public Set<ContainerID> getMissingContainers() {
-    return missingContainers;
+  public Set<T> getMissingEntries() {
+    return missingEntries;
   }
 
-  public Set<ContainerID> getNewContainers() {
-    return newContainers;
+  public Set<T> getNewEntries() {
+    return newEntries;
   }
 
-  static class ReportResultBuilder {
-    private Node2ContainerMap.ReportStatus status;
-    private Set<ContainerID> missingContainers;
-    private Set<ContainerID> newContainers;
-
-    static ReportResultBuilder newBuilder() {
-      return new ReportResultBuilder();
-    }
-
-    public ReportResultBuilder setStatus(
-        Node2ContainerMap.ReportStatus newstatus) {
-      this.status = newstatus;
+  /**
+   * Result after processing report for node2Object map.
+   * @param <T>
+   */
+  public static class ReportResultBuilder<T> {
+    private ReportStatus status;
+    private Set<T> missingEntries;
+    private Set<T> newEntries;
+
+    public ReportResultBuilder<T> setStatus(
+        ReportStatus newStatus) {
+      this.status = newStatus;
       return this;
     }
 
-    public ReportResultBuilder setMissingContainers(
-        Set<ContainerID> missingContainersLit) {
-      this.missingContainers = missingContainersLit;
+    public ReportResultBuilder<T> setMissingEntries(
+        Set<T> missingEntriesList) {
+      this.missingEntries = missingEntriesList;
       return this;
     }
 
-    public ReportResultBuilder setNewContainers(
-        Set<ContainerID> newContainersList) {
-      this.newContainers = newContainersList;
+    public ReportResultBuilder<T> setNewEntries(
+        Set<T> newEntriesList) {
+      this.newEntries = newEntriesList;
       return this;
     }
 
-    ReportResult build() {
+    public ReportResult<T> build() {
 
-      Set<ContainerID> nullSafeMissingContainers = this.missingContainers;
-      Set<ContainerID> nullSafeNewContainers = this.newContainers;
-      if (nullSafeNewContainers == null) {
-        nullSafeNewContainers = Collections.emptySet();
+      Set<T> nullSafeMissingEntries = this.missingEntries;
+      Set<T> nullSafeNewEntries = this.newEntries;
+      if (nullSafeNewEntries == null) {
+        nullSafeNewEntries = Collections.emptySet();
       }
-      if (nullSafeMissingContainers == null) {
-        nullSafeMissingContainers = Collections.emptySet();
+      if (nullSafeMissingEntries == null) {
+        nullSafeMissingEntries = Collections.emptySet();
       }
-      return new ReportResult(status, nullSafeMissingContainers,
-          nullSafeNewContainers);
+      return new ReportResult<T>(status, nullSafeMissingEntries,
+              nullSafeNewEntries);
     }
   }
+
+  /**
+   * Results possible from processing a report.
+   */
+  public enum ReportStatus {
+    ALL_IS_WELL,
+    MISSING_ENTRIES,
+    NEW_ENTRIES_FOUND,
+    MISSING_AND_NEW_ENTRIES_FOUND,
+    NEW_DATANODE_FOUND,
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/Node2PipelineMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/Node2PipelineMap.java
index 363ce71..87f2222 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/Node2PipelineMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/Node2PipelineMap.java
@@ -16,19 +16,15 @@
  *
  */
 
-package org.apache.hadoop.hdds.scm.pipelines;
+package org.apache.hadoop.hdds.scm.node.states;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 
-import java.util.Collections;
 import java.util.HashSet;
-import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
 
 /**
  * This data structure maintains the list of pipelines which the given datanode is a part of. This
@@ -36,33 +32,11 @@ import java.util.concurrent.ConcurrentHashMap;
  *
  * <p>TODO: this information needs to be regenerated from pipeline reports on SCM restart
  */
-public class Node2PipelineMap {
-  private final Map<UUID, Set<PipelineID>> dn2PipelineMap;
+public class Node2PipelineMap extends Node2ObjectsMap<PipelineID> {
 
   /** Constructs a Node2PipelineMap Object. */
   public Node2PipelineMap() {
-    dn2PipelineMap = new ConcurrentHashMap<>();
-  }
-
-  /**
-   * Returns true if this a datanode that is already tracked by Node2PipelineMap.
-   *
-   * @param datanodeID - UUID of the Datanode.
-   * @return True if this is tracked, false if this map does not know about it.
-   */
-  private boolean isKnownDatanode(UUID datanodeID) {
-    Preconditions.checkNotNull(datanodeID);
-    return dn2PipelineMap.containsKey(datanodeID);
-  }
-
-  /**
-   * Removes datanode Entry from the map.
-   *
-   * @param datanodeID - Datanode ID.
-   */
-  public synchronized void removeDatanode(UUID datanodeID) {
-    Preconditions.checkNotNull(datanodeID);
-    dn2PipelineMap.computeIfPresent(datanodeID, (k, v) -> null);
+    super();
   }
 
   /**
@@ -72,9 +46,7 @@ public class Node2PipelineMap {
    * @return Set of pipelines or Null.
    */
   public Set<PipelineID> getPipelines(UUID datanode) {
-    Preconditions.checkNotNull(datanode);
-    final Set<PipelineID> s = dn2PipelineMap.get(datanode);
-    return s != null? Collections.unmodifiableSet(s): Collections.emptySet();
+    return getObjects(datanode);
   }
 
   /**
@@ -85,7 +57,7 @@ public class Node2PipelineMap {
   public synchronized void addPipeline(Pipeline pipeline) {
     for (DatanodeDetails details : pipeline.getDatanodes().values()) {
       UUID dnId = details.getUuid();
-      dn2PipelineMap.computeIfAbsent(dnId, k -> new HashSet<>())
+      dn2ObjectMap.computeIfAbsent(dnId, k -> new HashSet<>())
           .add(pipeline.getId());
     }
   }
@@ -93,16 +65,11 @@ public class Node2PipelineMap {
   public synchronized void removePipeline(Pipeline pipeline) {
     for (DatanodeDetails details : pipeline.getDatanodes().values()) {
       UUID dnId = details.getUuid();
-      dn2PipelineMap.computeIfPresent(
-          dnId,
+      dn2ObjectMap.computeIfPresent(dnId,
           (k, v) -> {
             v.remove(pipeline.getId());
             return v;
           });
     }
   }
-
-  public Map<UUID, Set<PipelineID>> getDn2PipelineMap() {
-    return Collections.unmodifiableMap(dn2PipelineMap);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java
index 733dec5..e49678f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineCloseHandler.java
@@ -17,22 +17,36 @@
 
 package org.apache.hadoop.hdds.scm.pipelines;
 
-import org.apache.hadoop.hdds.scm.container.Mapping;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Handles pipeline close event.
  */
 public class PipelineCloseHandler implements EventHandler<PipelineID> {
-  private final Mapping mapping;
-  public PipelineCloseHandler(Mapping mapping) {
-    this.mapping = mapping;
+  private static final Logger LOG = LoggerFactory
+          .getLogger(PipelineCloseHandler.class);
+
+  private final PipelineSelector pipelineSelector;
+  public PipelineCloseHandler(PipelineSelector pipelineSelector) {
+    this.pipelineSelector = pipelineSelector;
   }
 
   @Override
   public void onMessage(PipelineID pipelineID, EventPublisher publisher) {
-    mapping.handlePipelineClose(pipelineID);
+    Pipeline pipeline = pipelineSelector.getPipeline(pipelineID);
+    try {
+      if (pipeline != null) {
+        pipelineSelector.finalizePipeline(pipeline);
+      } else {
+        LOG.debug("pipeline:{} not found", pipelineID);
+      }
+    } catch (Exception e) {
+      LOG.info("failed to close pipeline:{}", pipelineID, e);
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
index 07ff2b0..ca2e878 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.hdds.scm.pipelines;
 
 import java.util.ArrayList;
 import java.util.LinkedList;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -36,7 +38,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 public abstract class PipelineManager {
   private static final Logger LOG =
       LoggerFactory.getLogger(PipelineManager.class);
-  private final ArrayList<ActivePipelines> activePipelines;
+  protected final ArrayList<ActivePipelines> activePipelines;
 
   public PipelineManager() {
     activePipelines = new ArrayList<>();
@@ -45,7 +47,10 @@ public abstract class PipelineManager {
     }
   }
 
-  private static class ActivePipelines {
+  /**
+   * List of active pipelines.
+   */
+  public static class ActivePipelines {
     private final List<PipelineID> activePipelines;
     private final AtomicInteger pipelineIndex;
 
@@ -55,10 +60,12 @@ public abstract class PipelineManager {
     }
 
     void addPipeline(PipelineID pipelineID) {
-      activePipelines.add(pipelineID);
+      if (!activePipelines.contains(pipelineID)) {
+        activePipelines.add(pipelineID);
+      }
     }
 
-    void removePipeline(PipelineID pipelineID) {
+    public void removePipeline(PipelineID pipelineID) {
       activePipelines.remove(pipelineID);
     }
 
@@ -117,17 +124,6 @@ public abstract class PipelineManager {
             .addPipeline(pipeline.getId());
   }
 
-  protected static int getReplicationCount(ReplicationFactor factor) {
-    switch (factor) {
-    case ONE:
-      return 1;
-    case THREE:
-      return 3;
-    default:
-      throw new IllegalArgumentException("Unexpected replication count");
-    }
-  }
-
   public abstract Pipeline allocatePipeline(
       ReplicationFactor replicationFactor);
 
@@ -137,6 +133,14 @@ public abstract class PipelineManager {
    */
   public abstract void initializePipeline(Pipeline pipeline) throws IOException;
 
+  public void processPipelineReport(Pipeline pipeline, DatanodeDetails dn) {
+    if (pipeline.addMember(dn)
+        &&(pipeline.getDatanodes().size() == pipeline.getFactor().getNumber())
+        && pipeline.getLifeCycleState() == HddsProtos.LifeCycleState.OPEN) {
+      addOpenPipeline(pipeline);
+    }
+  }
+
   /**
    * Creates a pipeline with a specified replication factor and type.
    * @param replicationFactor - Replication Factor.
@@ -157,27 +161,11 @@ public abstract class PipelineManager {
    * Remove the pipeline from active allocation.
    * @param pipeline pipeline to be finalized
    */
-  public synchronized void finalizePipeline(Pipeline pipeline) {
-    activePipelines.get(pipeline.getFactor().ordinal())
-            .removePipeline(pipeline.getId());
-  }
+  public abstract boolean finalizePipeline(Pipeline pipeline);
 
   /**
    *
    * @param pipeline
    */
   public abstract void closePipeline(Pipeline pipeline) throws IOException;
-
-  /**
-   * list members in the pipeline.
-   * @return the datanode
-   */
-  public abstract List<DatanodeDetails> getMembers(PipelineID pipelineID)
-      throws IOException;
-
-  /**
-   * Update the datanode list of the pipeline.
-   */
-  public abstract void updatePipeline(PipelineID pipelineID,
-      List<DatanodeDetails> newDatanodes) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0956ee2/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java
new file mode 100644
index 0000000..933792b
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineReportHandler.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.pipelines;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+        .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.scm.server
+        .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Handles Node Reports from datanode.
+ */
+public class PipelineReportHandler implements
+        EventHandler<PipelineReportFromDatanode> {
+
+  private static final Logger LOGGER = LoggerFactory
+      .getLogger(PipelineReportHandler.class);
+  private final PipelineSelector pipelineSelector;
+
+  public PipelineReportHandler(PipelineSelector pipelineSelector) {
+    Preconditions.checkNotNull(pipelineSelector);
+    this.pipelineSelector = pipelineSelector;
+  }
+
+  @Override
+  public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode,
+      EventPublisher publisher) {
+    Preconditions.checkNotNull(pipelineReportFromDatanode);
+    DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails();
+    PipelineReportsProto pipelineReport =
+            pipelineReportFromDatanode.getReport();
+    Preconditions.checkNotNull(dn, "Pipeline Report is "
+        + "missing DatanodeDetails.");
+    LOGGER.trace("Processing pipeline report for dn: {}", dn);
+    pipelineSelector.processPipelineReport(dn, pipelineReport);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HDDS-514. Clean Unregister JMX upon SCMConnectionManager#close. Contributed by Xiaoyu Yao.

Posted by xy...@apache.org.
HDDS-514. Clean Unregister JMX upon SCMConnectionManager#close.
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/524f7cd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/524f7cd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/524f7cd3

Branch: refs/heads/HDDS-4
Commit: 524f7cd354e0683c9ec61fdbce344ef79b841728
Parents: 096a716
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Sep 20 12:21:34 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Sep 20 12:21:34 2018 -0700

----------------------------------------------------------------------
 .../container/common/statemachine/SCMConnectionManager.java   | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/524f7cd3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index 85fb580..775a91a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -59,7 +59,7 @@ public class SCMConnectionManager
 
   private final int rpcTimeout;
   private final Configuration conf;
-  private final ObjectName jmxBean;
+  private ObjectName jmxBean;
 
   public SCMConnectionManager(Configuration conf) {
     this.mapLock = new ReentrantReadWriteLock();
@@ -191,7 +191,10 @@ public class SCMConnectionManager
   public void close() throws IOException {
     getValues().forEach(endpointStateMachine
         -> IOUtils.cleanupWithLogger(LOG, endpointStateMachine));
-    MBeans.unregister(jmxBean);
+    if (jmxBean != null) {
+      MBeans.unregister(jmxBean);
+      jmxBean = null;
+    }
   }
 
   @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-8757. [Submarine] Add Tensorboard component when --tensorboard is specified. Contributed by Wangda Tan.

Posted by xy...@apache.org.
YARN-8757. [Submarine] Add Tensorboard component when --tensorboard is specified. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1824d5d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1824d5d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1824d5d1

Branch: refs/heads/HDDS-4
Commit: 1824d5d1c49c16db6341141fa204d4a4c02d0944
Parents: 56e0d63
Author: Sunil G <su...@apache.org>
Authored: Wed Sep 19 22:18:55 2018 +0530
Committer: Sunil G <su...@apache.org>
Committed: Wed Sep 19 22:18:55 2018 +0530

----------------------------------------------------------------------
 .../yarn/submarine/client/cli/CliConstants.java |   6 +
 .../yarn/submarine/client/cli/CliUtils.java     |  10 +-
 .../yarn/submarine/client/cli/RunJobCli.java    |  43 ++-
 .../client/cli/param/RunJobParameters.java      |  52 +++-
 .../fs/DefaultRemoteDirectoryManager.java       |  21 +-
 .../common/fs/RemoteDirectoryManager.java       |   4 +-
 .../common/FSBasedSubmarineStorageImpl.java     |   4 +-
 .../yarnservice/YarnServiceJobSubmitter.java    | 126 ++++++---
 .../runtimes/yarnservice/YarnServiceUtils.java  |  26 +-
 .../yarnservice/TestYarnServiceRunJobCli.java   | 268 ++++++++++++++++---
 .../common/fs/MockRemoteDirectoryManager.java   |   7 +-
 11 files changed, 463 insertions(+), 104 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
index d0958a8..d51ffc7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
@@ -35,6 +35,10 @@ public class CliConstants {
   public static final String DOCKER_IMAGE = "docker_image";
   public static final String QUEUE = "queue";
   public static final String TENSORBOARD = "tensorboard";
+  public static final String TENSORBOARD_RESOURCES = "tensorboard_resources";
+  public static final String TENSORBOARD_DEFAULT_RESOURCES =
+      "memory=4G,vcores=1";
+
   public static final String WORKER_LAUNCH_CMD = "worker_launch_cmd";
   public static final String SERVING_LAUNCH_CMD = "serving_launch_cmd";
   public static final String PS_LAUNCH_CMD = "ps_launch_cmd";
@@ -45,4 +49,6 @@ public class CliConstants {
   public static final String WAIT_JOB_FINISH = "wait_job_finish";
   public static final String PS_DOCKER_IMAGE = "ps_docker_image";
   public static final String WORKER_DOCKER_IMAGE = "worker_docker_image";
+  public static final String TENSORBOARD_DOCKER_IMAGE =
+      "tensorboard_docker_image";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
index 6dd3e4d..546c6eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliUtils.java
@@ -39,17 +39,9 @@ public class CliUtils {
   public static String replacePatternsInLaunchCommand(String specifiedCli,
       RunJobParameters jobRunParameters,
       RemoteDirectoryManager directoryManager) throws IOException {
-    String jobDir = jobRunParameters.getCheckpointPath();
-    if (null == jobDir) {
-      jobDir = directoryManager.getJobCheckpointDir(jobRunParameters.getName(),
-          true).toString();
-    }
-
     String input = jobRunParameters.getInputPath();
+    String jobDir = jobRunParameters.getCheckpointPath();
     String savedModelDir = jobRunParameters.getSavedModelPath();
-    if (null == savedModelDir) {
-      savedModelDir = jobDir;
-    }
 
     Map<String, String> replacePattern = new HashMap<>();
     if (jobDir != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
index d7dfc0d..faa22d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
@@ -89,8 +89,16 @@ public class RunJobCli extends AbstractCli {
     options.addOption(CliConstants.DOCKER_IMAGE, true, "Docker image name/tag");
     options.addOption(CliConstants.QUEUE, true,
         "Name of queue to run the job, by default it uses default queue");
-    options.addOption(CliConstants.TENSORBOARD, true,
-        "Should we run TensorBoard" + " for this job? By default it's true");
+    options.addOption(CliConstants.TENSORBOARD, false,
+        "Should we run TensorBoard"
+            + " for this job? By default it's disabled");
+    options.addOption(CliConstants.TENSORBOARD_RESOURCES, true,
+        "Specify resources of Tensorboard, by default it is "
+            + CliConstants.TENSORBOARD_DEFAULT_RESOURCES);
+    options.addOption(CliConstants.TENSORBOARD_DOCKER_IMAGE, true,
+        "Specify Tensorboard docker image. when this is not "
+            + "specified, Tensorboard " + "uses --" + CliConstants.DOCKER_IMAGE
+            + " as default.");
     options.addOption(CliConstants.WORKER_LAUNCH_CMD, true,
         "Commandline of worker, arguments will be "
             + "directly used to launch the worker");
@@ -144,10 +152,39 @@ public class RunJobCli extends AbstractCli {
       throw e;
     }
 
+    // Set default job dir / saved model dir, etc.
+    setDefaultDirs();
+
     // replace patterns
     replacePatternsInParameters();
   }
 
+  private void setDefaultDirs() throws IOException {
+    // Create directories if needed
+    String jobDir = parameters.getCheckpointPath();
+    if (null == jobDir) {
+      if (parameters.getNumWorkers() > 0) {
+        jobDir = clientContext.getRemoteDirectoryManager().getJobCheckpointDir(
+            parameters.getName(), true).toString();
+      } else {
+        // when #workers == 0, it means we only launch TB. In that case,
+        // point job dir to root dir so all job's metrics will be shown.
+        jobDir = clientContext.getRemoteDirectoryManager().getUserRootFolder()
+            .toString();
+      }
+      parameters.setCheckpointPath(jobDir);
+    }
+
+    if (parameters.getNumWorkers() > 0) {
+      // Only do this when #worker > 0
+      String savedModelDir = parameters.getSavedModelPath();
+      if (null == savedModelDir) {
+        savedModelDir = jobDir;
+        parameters.setSavedModelPath(savedModelDir);
+      }
+    }
+  }
+
   private void storeJobInformation(String jobName, ApplicationId applicationId,
       String[] args) throws IOException {
     Map<String, String> jobInfo = new HashMap<>();
@@ -198,7 +235,7 @@ public class RunJobCli extends AbstractCli {
   }
 
   @VisibleForTesting
-  RunJobParameters getRunJobParameters() {
+  public RunJobParameters getRunJobParameters() {
     return parameters;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
index 6cab9e3..4558f6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/RunJobParameters.java
@@ -37,6 +37,8 @@ public class RunJobParameters extends RunParameters {
   private Resource workerResource;
   private Resource psResource;
   private boolean tensorboardEnabled;
+  private Resource tensorboardResource;
+  private String tensorboardDockerImage;
   private String workerLaunchCmd;
   private String psLaunchCmd;
 
@@ -69,19 +71,23 @@ public class RunJobParameters extends RunParameters {
     // When distributed training is required
     if (nWorkers >= 2 && nPS > 0) {
       distributed = true;
-    } else if (nWorkers == 1 && nPS > 0) {
+    } else if (nWorkers <= 1 && nPS > 0) {
       throw new ParseException("Only specified one worker but non-zero PS, "
           + "please double check.");
     }
 
-    String workerResourceStr = parsedCommandLine.getOptionValue(
-        CliConstants.WORKER_RES);
-    if (workerResourceStr == null) {
-      throw new ParseException("--" + CliConstants.WORKER_RES + " is absent.");
+    workerResource = null;
+    if (nWorkers > 0) {
+      String workerResourceStr = parsedCommandLine.getOptionValue(
+          CliConstants.WORKER_RES);
+      if (workerResourceStr == null) {
+        throw new ParseException(
+            "--" + CliConstants.WORKER_RES + " is absent.");
+      }
+      workerResource = CliUtils.createResourceFromString(
+          workerResourceStr,
+          clientContext.getOrCreateYarnClient().getResourceTypeInfo());
     }
-    Resource workerResource = CliUtils.createResourceFromString(
-        workerResourceStr,
-        clientContext.getOrCreateYarnClient().getResourceTypeInfo());
 
     Resource psResource = null;
     if (nPS > 0) {
@@ -94,9 +100,19 @@ public class RunJobParameters extends RunParameters {
     }
 
     boolean tensorboard = false;
-    if (parsedCommandLine.getOptionValue(CliConstants.TENSORBOARD) != null) {
-      tensorboard = Boolean.parseBoolean(
-          parsedCommandLine.getOptionValue(CliConstants.TENSORBOARD));
+    if (parsedCommandLine.hasOption(CliConstants.TENSORBOARD)) {
+      tensorboard = true;
+      String tensorboardResourceStr = parsedCommandLine.getOptionValue(
+          CliConstants.TENSORBOARD_RESOURCES);
+      if (tensorboardResourceStr == null || tensorboardResourceStr.isEmpty()) {
+        tensorboardResourceStr = CliConstants.TENSORBOARD_DEFAULT_RESOURCES;
+      }
+      tensorboardResource = CliUtils.createResourceFromString(
+          tensorboardResourceStr,
+          clientContext.getOrCreateYarnClient().getResourceTypeInfo());
+      tensorboardDockerImage = parsedCommandLine.getOptionValue(
+          CliConstants.TENSORBOARD_DOCKER_IMAGE);
+      this.setTensorboardResource(tensorboardResource);
     }
 
     if (parsedCommandLine.hasOption(CliConstants.WAIT_JOB_FINISH)) {
@@ -115,7 +131,7 @@ public class RunJobParameters extends RunParameters {
 
     this.setInputPath(input).setCheckpointPath(jobDir).setNumPS(nPS).setNumWorkers(nWorkers)
         .setPSLaunchCmd(psLaunchCommand).setWorkerLaunchCmd(workerLaunchCmd)
-        .setPsResource(psResource).setWorkerResource(workerResource)
+        .setPsResource(psResource)
         .setTensorboardEnabled(tensorboard);
 
     super.updateParametersByParsedCommandline(parsedCommandLine,
@@ -219,4 +235,16 @@ public class RunJobParameters extends RunParameters {
   public boolean isDistributed() {
     return distributed;
   }
+
+  public Resource getTensorboardResource() {
+    return tensorboardResource;
+  }
+
+  public void setTensorboardResource(Resource tensorboardResource) {
+    this.tensorboardResource = tensorboardResource;
+  }
+
+  public String getTensorboardDockerImage() {
+    return tensorboardDockerImage;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/DefaultRemoteDirectoryManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/DefaultRemoteDirectoryManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/DefaultRemoteDirectoryManager.java
index fe8956a..b2e2b41 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/DefaultRemoteDirectoryManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/DefaultRemoteDirectoryManager.java
@@ -14,6 +14,7 @@
 
 package org.apache.hadoop.yarn.submarine.common.fs;
 
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.submarine.client.cli.CliConstants;
@@ -42,7 +43,10 @@ public class DefaultRemoteDirectoryManager implements RemoteDirectoryManager {
     if (create) {
       createFolderIfNotExist(staging);
     }
-    return staging;
+
+    // Get a file status to make sure it is a absolute path.
+    FileStatus fStatus = fs.getFileStatus(staging);
+    return fStatus.getPath();
   }
 
   @Override
@@ -70,8 +74,21 @@ public class DefaultRemoteDirectoryManager implements RemoteDirectoryManager {
     return fs;
   }
 
+  @Override
+  public Path getUserRootFolder() throws IOException {
+    Path rootPath = new Path("submarine", "jobs");
+    createFolderIfNotExist(rootPath);
+    // Get a file status to make sure it is a absolute path.
+    FileStatus fStatus = fs.getFileStatus(rootPath);
+    return fStatus.getPath();
+  }
+
   private Path getJobRootFolder(String jobName) throws IOException {
-    return new Path(new Path("submarine", "jobs"), jobName);
+    Path jobRootPath = getUserRootFolder();
+    createFolderIfNotExist(jobRootPath);
+    // Get a file status to make sure it is a absolute path.
+    FileStatus fStatus = fs.getFileStatus(jobRootPath);
+    return fStatus.getPath();
   }
 
   private void createFolderIfNotExist(Path path) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/RemoteDirectoryManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/RemoteDirectoryManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/RemoteDirectoryManager.java
index 132b314..ad0d428 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/RemoteDirectoryManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/common/fs/RemoteDirectoryManager.java
@@ -27,4 +27,6 @@ public interface RemoteDirectoryManager {
   Path getModelDir(String modelName, boolean create) throws IOException;
 
   FileSystem getFileSystem() throws IOException;
-}
+
+  Path getUserRootFolder() throws IOException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
index ebf9581..767fe78 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/common/FSBasedSubmarineStorageImpl.java
@@ -32,11 +32,9 @@ import java.util.Map;
  * A super naive FS-based storage.
  */
 public class FSBasedSubmarineStorageImpl extends SubmarineStorage {
-  ClientContext clientContext;
   RemoteDirectoryManager rdm;
 
   public FSBasedSubmarineStorageImpl(ClientContext clientContext) {
-    this.clientContext = clientContext;
     rdm = clientContext.getRemoteDirectoryManager();
   }
 
@@ -89,7 +87,7 @@ public class FSBasedSubmarineStorageImpl extends SubmarineStorage {
   private Map<String, String> deserializeMap(FSDataInputStream fis)
       throws IOException {
     ObjectInput oi = new ObjectInputStream(fis);
-    Map<String, String> newMap = null;
+    Map<String, String> newMap;
     try {
       newMap = (Map<String, String>) oi.readObject();
     } catch (ClassNotFoundException e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
index a2a2067..8fb213f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
@@ -15,6 +15,7 @@
 package org.apache.hadoop.yarn.submarine.runtimes.yarnservice;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -59,6 +60,10 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
   Service serviceSpec;
   private Set<Path> uploadedFiles = new HashSet<>();
 
+  // Used by testing
+  private Map<String, String> componentToLocalLaunchScriptPath =
+      new HashMap<>();
+
   public YarnServiceJobSubmitter(ClientContext clientContext) {
     this.clientContext = clientContext;
   }
@@ -186,6 +191,14 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
     envs.put(Envs.TASK_TYPE_ENV, taskType.name());
   }
 
+  private String getUserName() {
+    return System.getProperty("user.name");
+  }
+
+  private String getDNSDomain() {
+    return clientContext.getYarnConfig().get("hadoop.registry.dns.domain-name");
+  }
+
   /*
    * Generate a command launch script on local disk, returns patch to the script
    */
@@ -194,50 +207,48 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
     File file = File.createTempFile(taskType.name() + "-launch-script", ".sh");
     FileWriter fw = new FileWriter(file);
 
-    fw.append("#!/bin/bash\n");
-
-    addHdfsClassPathIfNeeded(parameters, fw, comp);
-
-    // For primary_worker
-    if (taskType == TaskType.PRIMARY_WORKER) {
-      // Do we need tensorboard?
-      if (parameters.isTensorboardEnabled()) {
-        int tensorboardPort = 6006;
-        // Run tensorboard at the background
-        fw.append(
-            "tensorboard --port " + tensorboardPort + " --logdir " + parameters
-                .getCheckpointPath() + " &\n");
-      }
-    }
-
-    // When distributed training is required
-    if (parameters.isDistributed()) {
-      // Generated TF_CONFIG
-      String tfConfigEnv = YarnServiceUtils.getTFConfigEnv(
-          taskType.getComponentName(), parameters.getNumWorkers(),
-          parameters.getNumPS(), parameters.getName(),
-          System.getProperty("user.name"),
-          clientContext.getYarnConfig().get("hadoop.registry.dns.domain-name"));
-      fw.append("export TF_CONFIG=\"" + tfConfigEnv + "\"\n");
-    }
+    try {
+      fw.append("#!/bin/bash\n");
 
-    // Print launch command
-    if (taskType.equals(TaskType.WORKER) || taskType.equals(
-        TaskType.PRIMARY_WORKER)) {
-      fw.append(parameters.getWorkerLaunchCmd() + '\n');
+      addHdfsClassPathIfNeeded(parameters, fw, comp);
 
-      if (SubmarineLogs.isVerbose()) {
-        LOG.info("Worker command =[" + parameters.getWorkerLaunchCmd() + "]");
-      }
-    } else if (taskType.equals(TaskType.PS)) {
-      fw.append(parameters.getPSLaunchCmd() + '\n');
+      if (taskType.equals(TaskType.TENSORBOARD)) {
+        String tbCommand =
+            "export LC_ALL=C && tensorboard --logdir=" + parameters
+                .getCheckpointPath();
+        fw.append(tbCommand + "\n");
+        LOG.info("Tensorboard command=" + tbCommand);
+      } else{
+        // When distributed training is required
+        if (parameters.isDistributed()) {
+          // Generated TF_CONFIG
+          String tfConfigEnv = YarnServiceUtils.getTFConfigEnv(
+              taskType.getComponentName(), parameters.getNumWorkers(),
+              parameters.getNumPS(), parameters.getName(), getUserName(),
+              getDNSDomain());
+          fw.append("export TF_CONFIG=\"" + tfConfigEnv + "\"\n");
+        }
 
-      if (SubmarineLogs.isVerbose()) {
-        LOG.info("PS command =[" + parameters.getPSLaunchCmd() + "]");
+        // Print launch command
+        if (taskType.equals(TaskType.WORKER) || taskType.equals(
+            TaskType.PRIMARY_WORKER)) {
+          fw.append(parameters.getWorkerLaunchCmd() + '\n');
+
+          if (SubmarineLogs.isVerbose()) {
+            LOG.info(
+                "Worker command =[" + parameters.getWorkerLaunchCmd() + "]");
+          }
+        } else if (taskType.equals(TaskType.PS)) {
+          fw.append(parameters.getPSLaunchCmd() + '\n');
+
+          if (SubmarineLogs.isVerbose()) {
+            LOG.info("PS command =[" + parameters.getPSLaunchCmd() + "]");
+          }
+        }
       }
+    } finally {
+      fw.close();
     }
-
-    fw.close();
     return file.getAbsolutePath();
   }
 
@@ -320,6 +331,8 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
         destScriptFileName, component);
 
     component.setLaunchCommand("./" + destScriptFileName);
+    componentToLocalLaunchScriptPath.put(taskType.getComponentName(),
+        localScriptFile);
   }
 
   private void addWorkerComponent(Service service,
@@ -410,6 +423,7 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
 
   private Service createServiceByParameters(RunJobParameters parameters)
       throws IOException {
+    componentToLocalLaunchScriptPath.clear();
     Service service = new Service();
     service.setName(parameters.getName());
     service.setVersion(String.valueOf(System.currentTimeMillis()));
@@ -417,7 +431,9 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
 
     handleServiceEnvs(service, parameters);
 
-    addWorkerComponents(service, parameters);
+    if (parameters.getNumWorkers() > 0) {
+      addWorkerComponents(service, parameters);
+    }
 
     if (parameters.getNumPS() > 0) {
       Component psComponent = new Component();
@@ -436,6 +452,31 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
       handleLaunchCommand(parameters, TaskType.PS, psComponent);
       service.addComponent(psComponent);
     }
+
+    if (parameters.isTensorboardEnabled()) {
+      Component tbComponent = new Component();
+      tbComponent.setName(TaskType.TENSORBOARD.getComponentName());
+      addCommonEnvironments(tbComponent, TaskType.TENSORBOARD);
+      tbComponent.setNumberOfContainers(1L);
+      tbComponent.setRestartPolicy(Component.RestartPolicyEnum.NEVER);
+      tbComponent.setResource(getServiceResourceFromYarnResource(
+          parameters.getTensorboardResource()));
+      if (parameters.getTensorboardDockerImage() != null) {
+        tbComponent.setArtifact(
+            getDockerArtifact(parameters.getTensorboardDockerImage()));
+      }
+
+      handleLaunchCommand(parameters, TaskType.TENSORBOARD, tbComponent);
+
+      // Add tensorboard to quicklink
+      String tensorboardLink = "http://" + YarnServiceUtils.getDNSName(
+          parameters.getName(), TaskType.TENSORBOARD.getComponentName(), 0,
+          getUserName(), getDNSDomain(), 6006);
+      LOG.info("Link to tensorboard:" + tensorboardLink);
+      service.addComponent(tbComponent);
+      service.setQuicklinks(ImmutableMap.of("Tensorboard", tensorboardLink));
+    }
+
     return service;
   }
 
@@ -458,4 +499,9 @@ public class YarnServiceJobSubmitter implements JobSubmitter {
   public Service getServiceSpec() {
     return serviceSpec;
   }
+
+  @VisibleForTesting
+  public Map<String, String> getComponentToLocalLaunchScriptPath() {
+    return componentToLocalLaunchScriptPath;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java
index f7ecc97..9238a67 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceUtils.java
@@ -40,10 +40,23 @@ public class YarnServiceUtils {
     YarnServiceUtils.stubServiceClient = stubServiceClient;
   }
 
+  public static String getDNSName(String serviceName, String componentName,
+      int index, String userName, String domain, int port) {
+    return componentName + "-" + index + getDNSNameCommonSuffix(serviceName,
+        userName, domain, port);
+  }
+
+  private static String getDNSNameCommonSuffix(String serviceName,
+      String userName, String domain, int port) {
+    String commonEndpointSuffix =
+        "." + serviceName + "." + userName + "." + domain + ":" + port;
+    return commonEndpointSuffix;
+  }
+
   public static String getTFConfigEnv(String curCommponentName, int nWorkers,
       int nPs, String serviceName, String userName, String domain) {
-    String commonEndpointSuffix =
-        "." + serviceName + "." + userName + "." + domain + ":8000";
+    String commonEndpointSuffix = getDNSNameCommonSuffix(serviceName, userName,
+        domain, 8000);
 
     String json = "{\\\"cluster\\\":{";
 
@@ -58,7 +71,14 @@ public class YarnServiceUtils {
             + " \\\"index\\\":" + '$' + Envs.TASK_INDEX_ENV + "},";
     String environment = "\\\"environment\\\":\\\"cloud\\\"}";
 
-    return json + master + worker + ps + task + environment;
+    StringBuilder sb = new StringBuilder();
+    sb.append(json);
+    sb.append(master);
+    sb.append(worker);
+    sb.append(ps);
+    sb.append(task);
+    sb.append(environment);
+    return sb.toString();
   }
 
   private static String getComponentArrayJson(String componentName, int count,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java
index e1756b8..a88d673 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/client/cli/yarnservice/TestYarnServiceRunJobCli.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.submarine.client.cli.yarnservice;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Service;
@@ -32,11 +33,15 @@ import org.apache.hadoop.yarn.submarine.runtimes.common.StorageKeyConstants;
 import org.apache.hadoop.yarn.submarine.runtimes.common.SubmarineStorage;
 import org.apache.hadoop.yarn.submarine.runtimes.yarnservice.YarnServiceJobSubmitter;
 import org.apache.hadoop.yarn.submarine.runtimes.yarnservice.YarnServiceUtils;
+import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Paths;
 import java.util.Map;
 
 import static org.mockito.Matchers.any;
@@ -65,25 +70,8 @@ public class TestYarnServiceRunJobCli {
     return ((YarnServiceJobSubmitter) jobSubmitter).getServiceSpec();
   }
 
-  @Test
-  public void testBasicRunJobForDistributedTraining() throws Exception {
-    MockClientContext mockClientContext =
-        YarnServiceCliTestUtils.getMockClientContext();
-    RunJobCli runJobCli = new RunJobCli(mockClientContext);
-    Assert.assertFalse(SubmarineLogs.isVerbose());
-
-    runJobCli.run(
-        new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
-            "--input_path", "s3://input", "--checkpoint_path",
-            "s3://output", "--num_workers", "3", "--num_ps", "2",
-            "--worker_launch_cmd", "python run-job.py", "--worker_resources",
-            "memory=2048M,vcores=2", "--ps_resources", "memory=4096M,vcores=4",
-            "--tensorboard", "true", "--ps_docker_image", "ps.image",
-            "--worker_docker_image", "worker.image",
-            "--ps_launch_cmd", "python run-ps.py", "--verbose" });
-    Service serviceSpec = getServiceSpecFromJobSubmitter(
-        runJobCli.getJobSubmitter());
-    Assert.assertEquals(3, serviceSpec.getComponents().size());
+  private void commonVerifyDistributedTrainingSpec(Service serviceSpec)
+      throws Exception {
     Assert.assertTrue(
         serviceSpec.getComponent(TaskType.WORKER.getComponentName()) != null);
     Assert.assertTrue(
@@ -98,7 +86,7 @@ public class TestYarnServiceRunJobCli {
         primaryWorkerComp.getResource().getCpus().intValue());
 
     Component workerComp = serviceSpec.getComponent(
-       TaskType.WORKER.getComponentName());
+        TaskType.WORKER.getComponentName());
     Assert.assertEquals(2048, workerComp.getResource().calcMemoryMB());
     Assert.assertEquals(2, workerComp.getResource().getCpus().intValue());
 
@@ -110,8 +98,55 @@ public class TestYarnServiceRunJobCli {
     Assert.assertEquals("ps.image", psComp.getArtifact().getId());
 
     Assert.assertTrue(SubmarineLogs.isVerbose());
+  }
+
+  @Test
+  public void testBasicRunJobForDistributedTraining() throws Exception {
+    MockClientContext mockClientContext =
+        YarnServiceCliTestUtils.getMockClientContext();
+    RunJobCli runJobCli = new RunJobCli(mockClientContext);
+    Assert.assertFalse(SubmarineLogs.isVerbose());
 
-    // TODO, ADD TEST TO USE SERVICE CLIENT TO VALIDATE THE JSON SPEC
+    runJobCli.run(
+        new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+            "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+            "--num_workers", "3", "--num_ps", "2", "--worker_launch_cmd",
+            "python run-job.py", "--worker_resources", "memory=2048M,vcores=2",
+            "--ps_resources", "memory=4096M,vcores=4", "--ps_docker_image",
+            "ps.image", "--worker_docker_image", "worker.image",
+            "--ps_launch_cmd", "python run-ps.py", "--verbose" });
+    Service serviceSpec = getServiceSpecFromJobSubmitter(
+        runJobCli.getJobSubmitter());
+    Assert.assertEquals(3, serviceSpec.getComponents().size());
+
+    commonVerifyDistributedTrainingSpec(serviceSpec);
+  }
+
+  @Test
+  public void testBasicRunJobForDistributedTrainingWithTensorboard()
+      throws Exception {
+    MockClientContext mockClientContext =
+        YarnServiceCliTestUtils.getMockClientContext();
+    RunJobCli runJobCli = new RunJobCli(mockClientContext);
+    Assert.assertFalse(SubmarineLogs.isVerbose());
+
+    runJobCli.run(
+        new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+            "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+            "--num_workers", "3", "--num_ps", "2", "--worker_launch_cmd",
+            "python run-job.py", "--worker_resources", "memory=2048M,vcores=2",
+            "--ps_resources", "memory=4096M,vcores=4", "--ps_docker_image",
+            "ps.image", "--worker_docker_image", "worker.image",
+            "--tensorboard", "--ps_launch_cmd", "python run-ps.py",
+            "--verbose" });
+    Service serviceSpec = getServiceSpecFromJobSubmitter(
+        runJobCli.getJobSubmitter());
+    Assert.assertEquals(4, serviceSpec.getComponents().size());
+
+    commonVerifyDistributedTrainingSpec(serviceSpec);
+
+    verifyTensorboardComponent(runJobCli, serviceSpec,
+        Resources.createResource(4096, 1));
   }
 
   @Test
@@ -123,13 +158,84 @@ public class TestYarnServiceRunJobCli {
 
     runJobCli.run(
         new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
-            "--input_path", "s3://input", "--checkpoint_path",
-            "s3://output", "--num_workers", "1", "--worker_launch_cmd",
-            "python run-job.py", "--worker_resources", "memory=2G,vcores=2",
-            "--tensorboard", "true", "--verbose" });
+            "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+            "--num_workers", "1", "--worker_launch_cmd", "python run-job.py",
+            "--worker_resources", "memory=2G,vcores=2", "--verbose" });
+
+    Service serviceSpec = getServiceSpecFromJobSubmitter(
+        runJobCli.getJobSubmitter());
+    Assert.assertEquals(1, serviceSpec.getComponents().size());
+
+    commonTestSingleNodeTraining(serviceSpec);
+  }
+
+  @Test
+  public void testTensorboardOnlyService() throws Exception {
+    MockClientContext mockClientContext =
+        YarnServiceCliTestUtils.getMockClientContext();
+    RunJobCli runJobCli = new RunJobCli(mockClientContext);
+    Assert.assertFalse(SubmarineLogs.isVerbose());
+
+    runJobCli.run(
+        new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+            "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+            "--num_workers", "0", "--tensorboard", "--verbose" });
+
+    Service serviceSpec = getServiceSpecFromJobSubmitter(
+        runJobCli.getJobSubmitter());
+    Assert.assertEquals(1, serviceSpec.getComponents().size());
+
+    verifyTensorboardComponent(runJobCli, serviceSpec,
+        Resources.createResource(4096, 1));
+  }
+
+  @Test
+  public void testTensorboardOnlyServiceWithCustomizedDockerImageAndResourceCkptPath()
+      throws Exception {
+    MockClientContext mockClientContext =
+        YarnServiceCliTestUtils.getMockClientContext();
+    RunJobCli runJobCli = new RunJobCli(mockClientContext);
+    Assert.assertFalse(SubmarineLogs.isVerbose());
+
+    runJobCli.run(
+        new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+            "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+            "--num_workers", "0", "--tensorboard", "--verbose",
+            "--tensorboard_resources", "memory=2G,vcores=2",
+            "--tensorboard_docker_image", "tb_docker_image:001" });
+
+    Service serviceSpec = getServiceSpecFromJobSubmitter(
+        runJobCli.getJobSubmitter());
+    Assert.assertEquals(1, serviceSpec.getComponents().size());
+
+    verifyTensorboardComponent(runJobCli, serviceSpec,
+        Resources.createResource(2048, 2));
+  }
+
+  @Test
+  public void testTensorboardOnlyServiceWithCustomizedDockerImageAndResource()
+      throws Exception {
+    MockClientContext mockClientContext =
+        YarnServiceCliTestUtils.getMockClientContext();
+    RunJobCli runJobCli = new RunJobCli(mockClientContext);
+    Assert.assertFalse(SubmarineLogs.isVerbose());
+
+    runJobCli.run(
+        new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+            "--num_workers", "0", "--tensorboard", "--verbose",
+            "--tensorboard_resources", "memory=2G,vcores=2",
+            "--tensorboard_docker_image", "tb_docker_image:001" });
+
     Service serviceSpec = getServiceSpecFromJobSubmitter(
         runJobCli.getJobSubmitter());
     Assert.assertEquals(1, serviceSpec.getComponents().size());
+
+    verifyTensorboardComponent(runJobCli, serviceSpec,
+        Resources.createResource(2048, 2));
+  }
+
+  private void commonTestSingleNodeTraining(Service serviceSpec)
+      throws Exception {
     Assert.assertTrue(
         serviceSpec.getComponent(TaskType.PRIMARY_WORKER.getComponentName())
             != null);
@@ -140,8 +246,110 @@ public class TestYarnServiceRunJobCli {
         primaryWorkerComp.getResource().getCpus().intValue());
 
     Assert.assertTrue(SubmarineLogs.isVerbose());
+  }
+
+  private void verifyTensorboardComponent(RunJobCli runJobCli,
+      Service serviceSpec, Resource resource) throws Exception {
+    Assert.assertTrue(
+        serviceSpec.getComponent(TaskType.TENSORBOARD.getComponentName())
+            != null);
+    Component tensorboardComp = serviceSpec.getComponent(
+        TaskType.TENSORBOARD.getComponentName());
+    Assert.assertEquals(1, tensorboardComp.getNumberOfContainers().intValue());
+    Assert.assertEquals(resource.getMemorySize(),
+        tensorboardComp.getResource().calcMemoryMB());
+    Assert.assertEquals(resource.getVirtualCores(),
+        tensorboardComp.getResource().getCpus().intValue());
+
+    Assert.assertEquals("./run-TENSORBOARD.sh",
+        tensorboardComp.getLaunchCommand());
+
+    // Check docker image
+    if (runJobCli.getRunJobParameters().getTensorboardDockerImage() != null) {
+      Assert.assertEquals(
+          runJobCli.getRunJobParameters().getTensorboardDockerImage(),
+          tensorboardComp.getArtifact().getId());
+    } else{
+      Assert.assertNull(tensorboardComp.getArtifact());
+    }
+
+    YarnServiceJobSubmitter yarnServiceJobSubmitter =
+        (YarnServiceJobSubmitter) runJobCli.getJobSubmitter();
+
+    String expectedLaunchScript =
+        "#!/bin/bash\n" + "echo \"CLASSPATH:$CLASSPATH\"\n"
+            + "echo \"HADOOP_CONF_DIR:$HADOOP_CONF_DIR\"\n"
+            + "echo \"HADOOP_TOKEN_FILE_LOCATION:$HADOOP_TOKEN_FILE_LOCATION\"\n"
+            + "echo \"JAVA_HOME:$JAVA_HOME\"\n"
+            + "echo \"LD_LIBRARY_PATH:$LD_LIBRARY_PATH\"\n"
+            + "echo \"HADOOP_HDFS_HOME:$HADOOP_HDFS_HOME\"\n"
+            + "export LC_ALL=C && tensorboard --logdir=" + runJobCli
+            .getRunJobParameters().getCheckpointPath() + "\n";
+
+    verifyLaunchScriptForComponet(yarnServiceJobSubmitter, serviceSpec,
+        TaskType.TENSORBOARD, expectedLaunchScript);
+  }
+
+  private void verifyLaunchScriptForComponet(
+      YarnServiceJobSubmitter yarnServiceJobSubmitter, Service serviceSpec,
+      TaskType taskType, String expectedLaunchScriptContent) throws Exception {
+    Map<String, String> componentToLocalLaunchScriptMap =
+        yarnServiceJobSubmitter.getComponentToLocalLaunchScriptPath();
+
+    String path = componentToLocalLaunchScriptMap.get(
+        taskType.getComponentName());
+
+    byte[] encoded = Files.readAllBytes(Paths.get(path));
+    String scriptContent = new String(encoded, Charset.defaultCharset());
+
+    Assert.assertEquals(expectedLaunchScriptContent, scriptContent);
+  }
+
+  @Test
+  public void testBasicRunJobForSingleNodeTrainingWithTensorboard()
+      throws Exception {
+    MockClientContext mockClientContext =
+        YarnServiceCliTestUtils.getMockClientContext();
+    RunJobCli runJobCli = new RunJobCli(mockClientContext);
+    Assert.assertFalse(SubmarineLogs.isVerbose());
+
+    runJobCli.run(
+        new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+            "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+            "--num_workers", "1", "--worker_launch_cmd", "python run-job.py",
+            "--worker_resources", "memory=2G,vcores=2", "--tensorboard",
+            "--verbose" });
+    Service serviceSpec = getServiceSpecFromJobSubmitter(
+        runJobCli.getJobSubmitter());
+
+    Assert.assertEquals(2, serviceSpec.getComponents().size());
+
+    commonTestSingleNodeTraining(serviceSpec);
+    verifyTensorboardComponent(runJobCli, serviceSpec,
+        Resources.createResource(4096, 1));
+  }
+
+  @Test
+  public void testBasicRunJobForSingleNodeTrainingWithGeneratedCheckpoint()
+      throws Exception {
+    MockClientContext mockClientContext =
+        YarnServiceCliTestUtils.getMockClientContext();
+    RunJobCli runJobCli = new RunJobCli(mockClientContext);
+    Assert.assertFalse(SubmarineLogs.isVerbose());
+
+    runJobCli.run(
+        new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
+            "--input_path", "s3://input", "--num_workers", "1",
+            "--worker_launch_cmd", "python run-job.py", "--worker_resources",
+            "memory=2G,vcores=2", "--tensorboard", "--verbose" });
+    Service serviceSpec = getServiceSpecFromJobSubmitter(
+        runJobCli.getJobSubmitter());
+
+    Assert.assertEquals(2, serviceSpec.getComponents().size());
 
-    // TODO, ADD TEST TO USE SERVICE CLIENT TO VALIDATE THE JSON SPEC
+    commonTestSingleNodeTraining(serviceSpec);
+    verifyTensorboardComponent(runJobCli, serviceSpec,
+        Resources.createResource(4096, 1));
   }
 
   @Test
@@ -153,10 +361,10 @@ public class TestYarnServiceRunJobCli {
 
     runJobCli.run(
         new String[] { "--name", "my-job", "--docker_image", "tf-docker:1.1.0",
-            "--input_path", "s3://input", "--checkpoint_path",
-            "s3://output", "--num_workers", "1", "--worker_launch_cmd",
-            "python run-job.py", "--worker_resources", "memory=2G,vcores=2",
-            "--tensorboard", "true", "--verbose" });
+            "--input_path", "s3://input", "--checkpoint_path", "s3://output",
+            "--num_workers", "1", "--worker_launch_cmd", "python run-job.py",
+            "--worker_resources", "memory=2G,vcores=2", "--tensorboard", "true",
+            "--verbose" });
     SubmarineStorage storage =
         mockClientContext.getRuntimeFactory().getSubmarineStorage();
     Map<String, String> jobInfo = storage.getJobInfoByName("my-job");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1824d5d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/common/fs/MockRemoteDirectoryManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/common/fs/MockRemoteDirectoryManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/common/fs/MockRemoteDirectoryManager.java
index a195b59..b637036 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/common/fs/MockRemoteDirectoryManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/test/java/org/apache/hadoop/yarn/submarine/common/fs/MockRemoteDirectoryManager.java
@@ -53,7 +53,7 @@ public class MockRemoteDirectoryManager implements RemoteDirectoryManager {
   @Override
   public Path getJobCheckpointDir(String jobName, boolean create)
       throws IOException {
-    return null;
+    return new Path("s3://generated_checkpoint_dir");
   }
 
   @Override
@@ -80,4 +80,9 @@ public class MockRemoteDirectoryManager implements RemoteDirectoryManager {
   public FileSystem getFileSystem() throws IOException {
     return FileSystem.getLocal(new Configuration());
   }
+
+  @Override
+  public Path getUserRootFolder() throws IOException {
+    return new Path("s3://generated_root_dir");
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8f02f28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8f02f28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8f02f28

Branch: refs/heads/HDDS-4
Commit: f8f02f2866728e8113dbe366f3fcf674a8630d11
Parents: 249df9a
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri Jun 8 08:33:58 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Sep 20 15:13:07 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/common/pom.xml                      |   6 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  19 ++
 .../hdds/security/x509/HDDSKeyGenerator.java    |  99 ++++++++
 .../hdds/security/x509/HDDSKeyPEMWriter.java    | 254 +++++++++++++++++++
 .../hdds/security/x509/SecurityConfig.java      | 190 ++++++++++++++
 .../hadoop/hdds/security/x509/package-info.java |  25 ++
 .../common/src/main/resources/ozone-default.xml |  40 +++
 .../security/x509/TestHDDSKeyGenerator.java     |  81 ++++++
 .../security/x509/TestHDDSKeyPEMWriter.java     | 213 ++++++++++++++++
 .../ozone/TestOzoneConfigurationFields.java     |   6 +
 10 files changed, 932 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index d08a5a9..9336196 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -77,7 +77,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>rocksdbjni</artifactId>
       <version>5.14.2</version>
     </dependency>
-
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-common</artifactId>
@@ -106,6 +105,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <version>2.6.0</version>
     </dependency>
 
+    <dependency>
+      <groupId>org.bouncycastle</groupId>
+      <artifactId>bcprov-jdk15on</artifactId>
+      <version>1.49</version>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 856d113..adadcd2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -94,4 +94,23 @@ public final class HddsConfigKeys {
       "hdds.lock.max.concurrency";
   public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100;
 
+  public static final String HDDS_KEY_LEN = "hdds.key.len";
+  public static final int HDDS_DEFAULT_KEY_LEN = 2048;
+  public static final String HDDS_KEY_ALGORITHM = "hdds.key.algo";
+  public static final String HDDS_DEFAULT_KEY_ALGORITHM = "RSA";
+  public static final String HDDS_SECURITY_PROVIDER = "hdds.security.provider";
+  public static final String HDDS_DEFAULT_SECURITY_PROVIDER = "BC";
+  public static final String HDDS_KEY_DIR_NAME = "hdds.key.dir.name";
+  public static final String HDDS_KEY_DIR_NAME_DEFAULT = "keys";
+
+  // TODO : Talk to StorageIO classes and see if they can return a secure
+  // storage location for each node.
+  public static final String HDDS_METADATA_DIR_NAME = "hdds.metadata.dir";
+  public static final String HDDS_PRIVATE_KEY_FILE_NAME =
+      "hdds.priv.key.file.name";
+  public static final String HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT = "private.pem";
+  public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file"
+      + ".name";
+  public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem";
+>>>>>>> HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. Contributed by Ajay Kumar.
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
new file mode 100644
index 0000000..cb411b2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyGenerator.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.hdds.security.x509;
+
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+
+/** A class to generate Key Pair for use with Certificates. */
+public class HDDSKeyGenerator {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HDDSKeyGenerator.class);
+  private final SecurityConfig securityConfig;
+
+  /**
+   * Constructor for HDDSKeyGenerator.
+   *
+   * @param configuration - config
+   */
+  public HDDSKeyGenerator(Configuration configuration) {
+    this.securityConfig = new SecurityConfig(configuration);
+  }
+
+  /**
+   * Returns the Security config used for this object.
+   * @return SecurityConfig
+   */
+  public SecurityConfig getSecurityConfig() {
+    return securityConfig;
+  }
+
+  /**
+   * Use Config to generate key.
+   *
+   * @return KeyPair
+   * @throws NoSuchProviderException
+   * @throws NoSuchAlgorithmException
+   */
+  public KeyPair generateKey() throws NoSuchProviderException,
+      NoSuchAlgorithmException {
+    return generateKey(securityConfig.getSize(),
+        securityConfig.getAlgo(), securityConfig.getProvider());
+  }
+
+  /**
+   * Specify the size -- all other parameters are used from config.
+   *
+   * @param size - int, valid key sizes.
+   * @return KeyPair
+   * @throws NoSuchProviderException
+   * @throws NoSuchAlgorithmException
+   */
+  public KeyPair generateKey(int size) throws
+      NoSuchProviderException, NoSuchAlgorithmException {
+    return generateKey(size,
+        securityConfig.getAlgo(), securityConfig.getProvider());
+  }
+
+  /**
+   * Custom Key Generation, all values are user provided.
+   *
+   * @param size - Key Size
+   * @param algorithm - Algorithm to use
+   * @param provider - Security provider.
+   * @return KeyPair.
+   * @throws NoSuchProviderException
+   * @throws NoSuchAlgorithmException
+   */
+  public KeyPair generateKey(int size, String algorithm, String provider)
+      throws NoSuchProviderException, NoSuchAlgorithmException {
+    LOG.info("Generating key pair using size:{}, Algorithm:{}, Provider:{}",
+        size, algorithm, provider);
+    KeyPairGenerator generator = KeyPairGenerator
+        .getInstance(algorithm, provider);
+    generator.initialize(size);
+    return generator.generateKeyPair();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyPEMWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyPEMWriter.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyPEMWriter.java
new file mode 100644
index 0000000..6ca7584
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/HDDSKeyPEMWriter.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.hdds.security.x509;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+import org.apache.commons.io.output.FileWriterWithEncoding;
+import org.apache.hadoop.conf.Configuration;
+import org.bouncycastle.util.io.pem.PemObject;
+import org.bouncycastle.util.io.pem.PemWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.security.KeyPair;
+import java.util.Set;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE;
+import static java.nio.file.attribute.PosixFilePermission.OWNER_READ;
+import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE;
+
+/**
+ * We store all Key material in good old PEM files.
+ * This helps in avoiding dealing will persistent
+ * Java KeyStore issues. Also when debugging,
+ * general tools like OpenSSL can be used to read and
+ * decode these files.
+ */
+public class HDDSKeyPEMWriter {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HDDSKeyPEMWriter.class);
+  private final Path location;
+  private final SecurityConfig securityConfig;
+  private Set<PosixFilePermission> permissionSet =
+      Stream.of(OWNER_READ, OWNER_WRITE,  OWNER_EXECUTE)
+          .collect(Collectors.toSet());
+  private Supplier<Boolean> isPosixFileSystem;
+  public final static String PRIVATE_KEY = "PRIVATE KEY";
+  public final static String PUBLIC_KEY = "PUBLIC KEY";
+  public static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8;
+  /*
+    Creates an HDDS Key Writer.
+
+    @param configuration - Configuration
+   */
+  public HDDSKeyPEMWriter(Configuration configuration) throws IOException {
+    Preconditions.checkNotNull(configuration, "Config cannot be null");
+    this.securityConfig = new SecurityConfig(configuration);
+    isPosixFileSystem = HDDSKeyPEMWriter::isPosix;
+    this.location = securityConfig.getKeyLocation();
+  }
+
+  /**
+   * Checks if File System supports posix style security permissions.
+   *
+   * @return True if it supports posix.
+   */
+  private static Boolean isPosix() {
+    return FileSystems.getDefault().supportedFileAttributeViews()
+        .contains("posix");
+  }
+
+  /**
+   * Returns the Permission set.
+   * @return Set
+   */
+  @VisibleForTesting
+  public Set<PosixFilePermission> getPermissionSet() {
+    return permissionSet;
+  }
+
+  /**
+   * Returns the Security config used for this object.
+   * @return SecurityConfig
+   */
+  public SecurityConfig getSecurityConfig() {
+    return securityConfig;
+  }
+
+  /**
+   * This function is used only for testing.
+   *
+   * @param isPosixFileSystem - Sets a boolean function for mimicking
+   * files systems that are not posix.
+   */
+  @VisibleForTesting
+  public void setIsPosixFileSystem(Supplier<Boolean> isPosixFileSystem) {
+    this.isPosixFileSystem = isPosixFileSystem;
+  }
+
+  /**
+   * Writes a given key using the default config options.
+   *
+   * @param keyPair - Key Pair to write to file.
+   * @throws IOException
+   */
+  public void writeKey(KeyPair keyPair) throws IOException {
+    writeKey(location, keyPair, securityConfig.getPrivateKeyName(),
+        securityConfig.getPublicKeyName(), false);
+  }
+
+  /**
+   * Writes a given key using default config options.
+   *
+   * @param keyPair - Key pair to write
+   * @param overwrite - Overwrites the keys if they already exist.
+   * @throws IOException
+   */
+  public void writeKey(KeyPair keyPair, boolean overwrite) throws IOException {
+    writeKey(location, keyPair, securityConfig.getPrivateKeyName(),
+        securityConfig.getPublicKeyName(), overwrite);
+  }
+
+  /**
+   * Writes a given key using default config options.
+   *
+   * @param basePath - The location to write to, override the config values.
+   * @param keyPair - Key pair to write
+   * @param overwrite - Overwrites the keys if they already exist.
+   * @throws IOException
+   */
+  public void writeKey(Path basePath, KeyPair keyPair, boolean overwrite)
+      throws IOException {
+    writeKey(basePath, keyPair, securityConfig.getPrivateKeyName(),
+        securityConfig.getPublicKeyName(), overwrite);
+  }
+
+  /**
+   * Helper function that actually writes data to the files.
+   *
+   * @param basePath - base path to write key
+   * @param keyPair - Key pair to write to file.
+   * @param privateKeyFileName - private key file name.
+   * @param publicKeyFileName - public key file name.
+   * @param force - forces overwriting the keys.
+   * @throws IOException
+   */
+  private synchronized void writeKey(Path basePath, KeyPair keyPair,
+      String privateKeyFileName, String publicKeyFileName, boolean force)
+      throws IOException {
+    checkPreconditions(basePath);
+
+    File privateKeyFile =
+        Paths.get(location.toString(), privateKeyFileName).toFile();
+    File publicKeyFile =
+        Paths.get(location.toString(), publicKeyFileName).toFile();
+    checkKeyFile(privateKeyFile, force, publicKeyFile);
+
+    try (PemWriter privateKeyWriter = new PemWriter(new
+        FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) {
+      privateKeyWriter.writeObject(
+          new PemObject(PRIVATE_KEY, keyPair.getPrivate().getEncoded()));
+    }
+
+    try (PemWriter publicKeyWriter = new PemWriter(new
+        FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) {
+      publicKeyWriter.writeObject(
+          new PemObject(PUBLIC_KEY, keyPair.getPublic().getEncoded()));
+    }
+    Files.setPosixFilePermissions(privateKeyFile.toPath(), permissionSet);
+    Files.setPosixFilePermissions(publicKeyFile.toPath(), permissionSet);
+  }
+
+  /**
+   * Checks if private and public key file already exists. Throws IOException
+   * if file exists and force flag is set to false, else will delete the
+   * existing file.
+   *
+   * @param privateKeyFile - Private key file.
+   * @param force - forces overwriting the keys.
+   * @param publicKeyFile - public key file.
+   * @throws IOException
+   */
+  private void checkKeyFile(File privateKeyFile, boolean force,
+      File publicKeyFile) throws IOException {
+    if (privateKeyFile.exists() && force) {
+      if (!privateKeyFile.delete()) {
+        throw new IOException("Unable to delete private key file.");
+      }
+    }
+
+    if (publicKeyFile.exists() && force) {
+      if (!publicKeyFile.delete()) {
+        throw new IOException("Unable to delete public key file.");
+      }
+    }
+
+    if (privateKeyFile.exists()) {
+      throw new IOException("Private Key file already exists.");
+    }
+
+    if (publicKeyFile.exists()) {
+      throw new IOException("Public Key file already exists.");
+    }
+  }
+
+  /**
+   * Checks if base path exists and sets file permissions.
+   *
+   * @param basePath - base path to write key
+   * @throws IOException
+   */
+  private void checkPreconditions(Path basePath) throws IOException {
+    Preconditions.checkNotNull(basePath, "Base path cannot be null");
+    if (!isPosixFileSystem.get()) {
+      LOG.error("Keys cannot be stored securely without POSIX file system "
+          + "support for now.");
+      throw new IOException("Unsupported File System for pem file.");
+    }
+
+    if (Files.exists(basePath)) {
+      // Not the end of the world if we reset the permissions on an existing
+      // directory.
+      Files.setPosixFilePermissions(basePath, permissionSet);
+    } else {
+      boolean success = basePath.toFile().mkdirs();
+      if (!success) {
+        LOG.error("Unable to create the directory for the "
+            + "location. Location: {}", basePath);
+        throw new IOException("Unable to create the directory for the "
+            + "location. Location:" + basePath);
+      }
+      Files.setPosixFilePermissions(basePath, permissionSet);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
new file mode 100644
index 0000000..896a379
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.security.Provider;
+import java.security.Security;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_LEN;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_ALGORITHM;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_SECURITY_PROVIDER;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_ALGORITHM;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_LEN;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_PROVIDER;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
+
+/**
+ * A class that deals with all Security related configs in HDDDS.
+ * It is easier to have all Java code related to config in a single place.
+ */
+public class SecurityConfig {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SecurityConfig.class);
+  private static volatile Provider provider;
+  private final Configuration configuration;
+  private final int size;
+  private final String algo;
+  private final String providerString;
+  private final String metadatDir;
+  private final String keyDir;
+  private final String privateKeyName;
+  private final String publicKeyName;
+
+  /**
+   * Constructs a HDDSKeyGenerator.
+   *
+   * @param configuration - HDDS Configuration
+   */
+  public SecurityConfig(Configuration configuration) {
+    Preconditions.checkNotNull(configuration, "Configuration cannot be null");
+    this.configuration = configuration;
+    this.size = this.configuration.getInt(HDDS_KEY_LEN, HDDS_DEFAULT_KEY_LEN);
+    this.algo = this.configuration.get(HDDS_KEY_ALGORITHM,
+        HDDS_DEFAULT_KEY_ALGORITHM);
+    this.providerString = this.configuration.get(HDDS_SECURITY_PROVIDER,
+            HDDS_DEFAULT_SECURITY_PROVIDER);
+
+    // Please Note: To make it easy for our customers we will attempt to read
+    // HDDS metadata dir and if that is not set, we will use Ozone directory.
+    // TODO: We might want to fix this later.
+    this.metadatDir = this.configuration.get(HDDS_METADATA_DIR_NAME,
+        configuration.get(OZONE_METADATA_DIRS));
+
+    Preconditions.checkNotNull(this.metadatDir, "Metadata directory can't be"
+        + " null. Please check configs.");
+    this.keyDir = this.configuration.get(HDDS_KEY_DIR_NAME,
+        HDDS_KEY_DIR_NAME_DEFAULT);
+    this.privateKeyName = this.configuration.get(HDDS_PRIVATE_KEY_FILE_NAME,
+        HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT);
+    this.publicKeyName =  this.configuration.get(HDDS_PUBLIC_KEY_FILE_NAME,
+        HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT);
+
+    // First Startup -- if the provider is null, check for the provider.
+    if (SecurityConfig.provider == null) {
+      synchronized (SecurityConfig.class) {
+        provider = Security.getProvider(this.providerString);
+        if (SecurityConfig.provider == null) {
+          // Provider not found, let us try to Dynamically initialize the
+          // provider.
+          provider = initSecurityProvider(this.providerString);
+        }
+      }
+    }
+  }
+
+  /**
+   * Returns the Provider name.
+   * @return String Provider name.
+   */
+  public String getProviderString() {
+    return providerString;
+  }
+
+  /**
+   * Returns the public key file name.
+   * @return String, File name used for public keys.
+   */
+  public String getPublicKeyName() {
+    return publicKeyName;
+  }
+
+  /**
+   * Returns the private key file name.
+   * @return String, File name used for private keys.
+   */
+  public String getPrivateKeyName() {
+    return privateKeyName;
+  }
+
+  /**
+   * Returns the File path to where keys are stored.
+   * @return  String Key location.
+   */
+  public Path getKeyLocation() {
+    return Paths.get(metadatDir, keyDir);
+  }
+
+  /**
+   * Gets the Key Size.
+   *
+   * @return key size.
+   */
+  public int getSize() {
+    return size;
+  }
+
+  /**
+   * Gets provider.
+   *
+   * @return String Provider name.
+   */
+  public String getProvider() {
+    return providerString;
+  }
+
+  /**
+   * Returns the Key generation Algorithm used.
+   *
+   * @return String Algo.
+   */
+  public String getAlgo() {
+    return algo;
+  }
+
+  /**
+   * Returns the Configuration used for initializing this SecurityConfig.
+   * @return  Configuration
+   */
+  public Configuration getConfiguration() {
+    return configuration;
+  }
+
+
+  /**
+   * Adds a security provider dynamically if it is not loaded already.
+   *
+   * @param providerName - name of the provider.
+   */
+  private Provider initSecurityProvider(String providerName) {
+    switch (providerName) {
+    case "BC":
+      Security.addProvider(new BouncyCastleProvider());
+      return Security.getProvider(providerName);
+    default:
+      LOG.error("Security Provider:{} is unknown", provider);
+      throw new SecurityException("Unknown security provider:" + provider);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
new file mode 100644
index 0000000..89d5d51
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+
+/**
+ * This package contains common routines used in creating an x509 based
+ * identity framework for HDDS.
+ */
+package org.apache.hadoop.hdds.security.x509;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index d450b37..dd0aca3 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1291,4 +1291,44 @@
       OzoneManager http server kerberos keytab.
     </description>
   </property>
+  <property>
+    <name>hdds.key.len</name>
+    <value>2048</value>
+    <tag>SCM, HDDS, X509, SECURITY</tag>
+    <description>
+      SCM CA key length.  This is an algorithm-specific metric, such as modulus length, specified in number of bits.
+    </description>
+  </property>
+  <property>
+    <name>hdds.key.dir.name</name>
+    <value>keys</value>
+    <tag>SCM, HDDS, X509, SECURITY</tag>
+    <description>
+      Directory to store public/private key for SCM CA. This is relative to ozone/hdds meteadata dir.
+    </description>
+  </property>
+  <property>
+    <name>hdds.metadata.dir</name>
+    <value/>
+    <tag>X509, SECURITY</tag>
+    <description>
+      Absolute path to HDDS metadata dir.
+    </description>
+  </property>
+  <property>
+    <name>hdds.priv.key.file.name</name>
+    <value>private.pem</value>
+    <tag>X509, SECURITY</tag>
+    <description>
+      Name of file which stores private key generated for SCM CA.
+    </description>
+  </property>
+  <property>
+    <name>hdds.public.key.file.name</name>
+    <value>public.pem</value>
+    <tag>X509, SECURITY</tag>
+    <description>
+      Name of file which stores public key generated for SCM CA.
+    </description>
+  </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyGenerator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyGenerator.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyGenerator.java
new file mode 100644
index 0000000..2ddf59c
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyGenerator.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
+import java.security.KeyPair;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.PublicKey;
+import java.security.interfaces.RSAPublicKey;
+import java.security.spec.PKCS8EncodedKeySpec;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test class for HDDS Key Generator.
+ */
+public class TestHDDSKeyGenerator {
+  private static SecurityConfig config;
+
+  @Before
+  public void init() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OZONE_METADATA_DIRS,  GenericTestUtils.getTempPath("testpath"));
+    config = new SecurityConfig(conf);
+  }
+  /**
+   * In this test we verify that we are able to create a key pair, then get
+   * bytes of that and use ASN1. parser to parse it back to a private key.
+   * @throws NoSuchProviderException
+   * @throws NoSuchAlgorithmException
+   */
+  @Test
+  public void testGenerateKey()
+      throws NoSuchProviderException, NoSuchAlgorithmException {
+    HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration());
+    KeyPair keyPair = keyGen.generateKey();
+    Assert.assertEquals(config.getAlgo(), keyPair.getPrivate().getAlgorithm());
+    PKCS8EncodedKeySpec keySpec =
+        new PKCS8EncodedKeySpec(keyPair.getPrivate().getEncoded());
+    Assert.assertEquals("PKCS#8", keySpec.getFormat());
+  }
+
+  /**
+   * In this test we assert that size that we specified is used for Key
+   * generation.
+   * @throws NoSuchProviderException
+   * @throws NoSuchAlgorithmException
+   */
+  @Test
+  public void testGenerateKeyWithSize() throws NoSuchProviderException,
+      NoSuchAlgorithmException {
+    HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration());
+    KeyPair keyPair = keyGen.generateKey(4096);
+    PublicKey publicKey = keyPair.getPublic();
+    if(publicKey instanceof RSAPublicKey) {
+      Assert.assertEquals(4096,
+          ((RSAPublicKey)(publicKey)).getModulus().bitLength());
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyPEMWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyPEMWriter.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyPEMWriter.java
new file mode 100644
index 0000000..68ff9e6
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/TestHDDSKeyPEMWriter.java
@@ -0,0 +1,213 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.security.KeyFactory;
+import java.security.KeyPair;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.PrivateKey;
+import java.security.PublicKey;
+import java.security.spec.InvalidKeySpecException;
+import java.security.spec.PKCS8EncodedKeySpec;
+import java.security.spec.X509EncodedKeySpec;
+import java.util.Set;
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+/**
+ * Test class for HDDS pem writer.
+ */
+public class TestHDDSKeyPEMWriter {
+
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+  private OzoneConfiguration configuration;
+  private HDDSKeyGenerator keyGenerator;
+  private String prefix;
+
+  @Before
+  public void init() throws IOException {
+    configuration = new OzoneConfiguration();
+    prefix = temporaryFolder.newFolder().toString();
+    configuration.set(HDDS_METADATA_DIR_NAME, prefix);
+    keyGenerator = new HDDSKeyGenerator(configuration);
+  }
+
+  /**
+   * Assert basic things like we are able to create a file, and the names are
+   * in expected format etc.
+   *
+   * @throws NoSuchProviderException
+   * @throws NoSuchAlgorithmException
+   * @throws IOException
+   */
+  @Test
+  public void testWriteKey()
+      throws NoSuchProviderException, NoSuchAlgorithmException,
+      IOException, InvalidKeySpecException {
+    KeyPair keys = keyGenerator.generateKey();
+    HDDSKeyPEMWriter pemWriter = new HDDSKeyPEMWriter(configuration);
+    pemWriter.writeKey(keys);
+
+    // Assert that locations have been created.
+    Path keyLocation = pemWriter.getSecurityConfig().getKeyLocation();
+    Assert.assertTrue(keyLocation.toFile().exists());
+
+    // Assert that locations are created in the locations that we specified
+    // using the Config.
+    Assert.assertTrue(keyLocation.toString().startsWith(prefix));
+    Path privateKeyPath = Paths.get(keyLocation.toString(),
+        pemWriter.getSecurityConfig().getPrivateKeyName());
+    Assert.assertTrue(privateKeyPath.toFile().exists());
+    Path publicKeyPath = Paths.get(keyLocation.toString(),
+        pemWriter.getSecurityConfig().getPublicKeyName());
+    Assert.assertTrue(publicKeyPath.toFile().exists());
+
+    // Read the private key and test if the expected String in the PEM file
+    // format exists.
+    byte[] privateKey = Files.readAllBytes(privateKeyPath);
+    String privateKeydata = new String(privateKey, StandardCharsets.UTF_8);
+    Assert.assertTrue(privateKeydata.contains("PRIVATE KEY"));
+
+    // Read the public key and test if the expected String in the PEM file
+    // format exists.
+    byte[] publicKey = Files.readAllBytes(publicKeyPath);
+    String publicKeydata = new String(publicKey, StandardCharsets.UTF_8);
+    Assert.assertTrue(publicKeydata.contains("PUBLIC KEY"));
+
+    // Let us decode the PEM file and parse it back into binary.
+    KeyFactory kf = KeyFactory.getInstance(
+        pemWriter.getSecurityConfig().getAlgo());
+
+    // Replace the PEM Human readable guards.
+    privateKeydata =
+        privateKeydata.replace("-----BEGIN PRIVATE KEY-----\n", "");
+    privateKeydata =
+        privateKeydata.replace("-----END PRIVATE KEY-----", "");
+
+    // Decode the bas64 to binary format and then use an ASN.1 parser to
+    // parse the binary format.
+
+    byte[] keyBytes = Base64.decodeBase64(privateKeydata);
+    PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(keyBytes);
+    PrivateKey privateKeyDecoded = kf.generatePrivate(spec);
+    Assert.assertNotNull("Private Key should not be null",
+        privateKeyDecoded);
+
+    // Let us decode the public key and veriy that we can parse it back into
+    // binary.
+    publicKeydata =
+        publicKeydata.replace("-----BEGIN PUBLIC KEY-----\n", "");
+    publicKeydata =
+        publicKeydata.replace("-----END PUBLIC KEY-----", "");
+
+    keyBytes = Base64.decodeBase64(publicKeydata);
+    X509EncodedKeySpec pubKeyspec = new X509EncodedKeySpec(keyBytes);
+    PublicKey publicKeyDecoded = kf.generatePublic(pubKeyspec);
+    Assert.assertNotNull("Public Key should not be null",
+        publicKeyDecoded);
+
+    // Now let us assert the permissions on the Directories and files are as
+    // expected.
+    Set<PosixFilePermission> expectedSet = pemWriter.getPermissionSet();
+    Set<PosixFilePermission> currentSet =
+        Files.getPosixFilePermissions(privateKeyPath);
+    currentSet.removeAll(expectedSet);
+    Assert.assertEquals(0, currentSet.size());
+
+    currentSet =
+        Files.getPosixFilePermissions(publicKeyPath);
+    currentSet.removeAll(expectedSet);
+    Assert.assertEquals(0, currentSet.size());
+
+    currentSet =
+        Files.getPosixFilePermissions(keyLocation);
+    currentSet.removeAll(expectedSet);
+    Assert.assertEquals(0, currentSet.size());
+  }
+
+  /**
+   * Assert key rewrite fails without force option.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testReWriteKey()
+      throws Exception {
+    KeyPair kp = keyGenerator.generateKey();
+    HDDSKeyPEMWriter pemWriter = new HDDSKeyPEMWriter(configuration);
+    SecurityConfig secConfig = pemWriter.getSecurityConfig();
+    pemWriter.writeKey(kp);
+
+    // Assert that rewriting of keys throws exception with valid messages.
+    LambdaTestUtils
+        .intercept(IOException.class, "Private Key file already exists.",
+            () -> pemWriter.writeKey(kp));
+    FileUtils.deleteQuietly(Paths.get(
+        secConfig.getKeyLocation().toString() + "/" + secConfig
+            .getPrivateKeyName()).toFile());
+    LambdaTestUtils
+        .intercept(IOException.class, "Public Key file already exists.",
+            () -> pemWriter.writeKey(kp));
+    FileUtils.deleteQuietly(Paths.get(
+        secConfig.getKeyLocation().toString() + "/" + secConfig
+            .getPublicKeyName()).toFile());
+
+    // Should succeed now as both public and private key are deleted.
+    pemWriter.writeKey(kp);
+    // Should succeed with overwrite flag as true.
+    pemWriter.writeKey(kp, true);
+
+  }
+
+  /**
+   * Assert key rewrite fails in non Posix file system.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testWriteKeyInNonPosixFS()
+      throws Exception {
+    KeyPair kp = keyGenerator.generateKey();
+    HDDSKeyPEMWriter pemWriter = new HDDSKeyPEMWriter(configuration);
+    pemWriter.setIsPosixFileSystem(() -> false);
+
+    // Assert key rewrite fails in non Posix file system.
+    LambdaTestUtils
+        .intercept(IOException.class, "Unsupported File System for pem file.",
+            () -> pemWriter.writeKey(kp));
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8f02f28/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 909cddf..8cc9c57 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -36,5 +36,11 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
     errorIfMissingConfigProps = true;
     errorIfMissingXmlProps = true;
     xmlPropsToSkipCompare.add("hadoop.tags.custom");
+    addPropertiesNotInXml();
+  }
+
+  private void addPropertiesNotInXml() {
+    configurationPropsToSkipCompare.add(HddsConfigKeys.HDDS_KEY_ALGORITHM);
+    configurationPropsToSkipCompare.add(HddsConfigKeys.HDDS_SECURITY_PROVIDER);
   }
 }
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-8801. Fixed header comments for docker utility functions. Contributed by Zian Chen

Posted by xy...@apache.org.
YARN-8801.  Fixed header comments for docker utility functions.
            Contributed by Zian Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa4bd493
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa4bd493
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa4bd493

Branch: refs/heads/HDDS-4
Commit: aa4bd493c309f09f8f2ea7449aa33c8b641fb8d2
Parents: 429a07e
Author: Eric Yang <ey...@apache.org>
Authored: Thu Sep 20 13:08:59 2018 -0400
Committer: Eric Yang <ey...@apache.org>
Committed: Thu Sep 20 13:08:59 2018 -0400

----------------------------------------------------------------------
 .../container-executor/impl/utils/docker-util.h | 30 +++++++-------------
 1 file changed, 10 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa4bd493/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
index 278dc53..7b7322d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
@@ -81,8 +81,7 @@ char *get_docker_binary(const struct configuration *conf);
  * Get the Docker command line string. The function will inspect the params file to determine the command to be run.
  * @param command_file File containing the params for the Docker command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the Docker command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_command(const char* command_file, const struct configuration* conf, args *args);
@@ -98,8 +97,7 @@ int get_use_entry_point_flag();
  * inspect command.
  * @param command_file File containing the params for the Docker inspect command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the inspect command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_inspect_command(const char* command_file, const struct configuration* conf, args *args);
@@ -108,8 +106,7 @@ int get_docker_inspect_command(const char* command_file, const struct configurat
  * Get the Docker load command line string. The function will verify that the params file is meant for the load command.
  * @param command_file File containing the params for the Docker load command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the load command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_load_command(const char* command_file, const struct configuration* conf, args *args);
@@ -118,8 +115,7 @@ int get_docker_load_command(const char* command_file, const struct configuration
  * Get the Docker pull command line string. The function will verify that the params file is meant for the pull command.
  * @param command_file File containing the params for the Docker pull command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the pull command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_pull_command(const char* command_file, const struct configuration* conf, args *args);
@@ -128,8 +124,7 @@ int get_docker_pull_command(const char* command_file, const struct configuration
  * Get the Docker rm command line string. The function will verify that the params file is meant for the rm command.
  * @param command_file File containing the params for the Docker rm command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the rm command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_rm_command(const char* command_file, const struct configuration* conf, args *args);
@@ -138,8 +133,7 @@ int get_docker_rm_command(const char* command_file, const struct configuration*
  * Get the Docker run command line string. The function will verify that the params file is meant for the run command.
  * @param command_file File containing the params for the Docker run command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the run command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_run_command(const char* command_file, const struct configuration* conf, args *args);
@@ -148,8 +142,7 @@ int get_docker_run_command(const char* command_file, const struct configuration*
  * Get the Docker stop command line string. The function will verify that the params file is meant for the stop command.
  * @param command_file File containing the params for the Docker stop command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the stop command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_stop_command(const char* command_file, const struct configuration* conf, args *args);
@@ -158,8 +151,7 @@ int get_docker_stop_command(const char* command_file, const struct configuration
  * Get the Docker kill command line string. The function will verify that the params file is meant for the kill command.
  * @param command_file File containing the params for the Docker kill command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the kill command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_kill_command(const char* command_file, const struct configuration* conf, args *args);
@@ -169,8 +161,7 @@ int get_docker_kill_command(const char* command_file, const struct configuration
  * params file is meant for the volume command.
  * @param command_file File containing the params for the Docker volume command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the volume command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_volume_command(const char *command_file, const struct configuration *conf, args *args);
@@ -179,8 +170,7 @@ int get_docker_volume_command(const char *command_file, const struct configurati
  * Get the Docker start command line string. The function will verify that the params file is meant for the start command.
  * @param command_file File containing the params for the Docker start command
  * @param conf Configuration struct containing the container-executor.cfg details
- * @param out Buffer to fill with the start command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating error
  */
 int get_docker_start_command(const char* command_file, const struct configuration* conf, args *args);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: Fix merge conflicts

Posted by xy...@apache.org.
Fix merge conflicts


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5641534e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5641534e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5641534e

Branch: refs/heads/HDDS-4
Commit: 5641534e44d137482a7e5e0fc078ee0d7b684fe0
Parents: f8f02f2
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Jul 31 18:17:29 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Sep 20 15:13:07 2018 -0700

----------------------------------------------------------------------
 .../conf/TestConfigurationFieldsBase.java       |  2 -
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  1 -
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   | 10 ++-
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |  5 --
 .../common/src/main/resources/ozone-default.xml | 33 +++-----
 .../scm/server/StorageContainerManager.java     |  8 +-
 .../StorageContainerManagerHttpServer.java      |  4 +-
 .../hadoop/hdds/scm/block/TestBlockManager.java |  3 +-
 .../src/test/compose/compose-secure/.env        |  2 +-
 .../test/compose/compose-secure/docker-config   | 55 ++++++++++---
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  | 84 --------------------
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  3 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |  2 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java    | 22 ++---
 14 files changed, 87 insertions(+), 147 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
index bce1cd5..152159b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
@@ -436,8 +436,6 @@ public abstract class TestConfigurationFieldsBase {
     // Create XML key/value map
     LOG_XML.debug("Reading XML property files\n");
     xmlKeyValueMap = extractPropertiesFromXml(xmlFilename);
-    // Remove hadoop property set in ozone-default.xml
-    xmlKeyValueMap.remove("hadoop.custom.tags");
     LOG_XML.debug("\n=====\n");
 
     // Create default configuration variable key/value map

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index adadcd2..6524d4a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -112,5 +112,4 @@ public final class HddsConfigKeys {
   public static final String HDDS_PUBLIC_KEY_FILE_NAME = "hdds.public.key.file"
       + ".name";
   public static final String HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT = "public.pem";
->>>>>>> HDDS-100. SCM CA: generate public/private key pair for SCM/OM/DNs. Contributed by Ajay Kumar.
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 4b0e7c8..bc90c1e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -279,10 +279,12 @@ public final class ScmConfigKeys {
   public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
       "10m";
 
-  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
-      "ozone.scm.web.authentication.kerberos.principal";
-  public static final String SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
-      "ozone.scm.web.authentication.kerberos.keytab";
+  public static final String
+      HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY =
+      "hdds.scm.web.authentication.kerberos.principal";
+  public static final String
+      HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY =
+      "hdds.scm.web.authentication.kerberos.keytab";
   /**
    * Never constructed.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 944605b..cea23cb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -302,11 +302,6 @@ public final class OzoneConfigKeys {
   public static final String OZONE_CONTAINER_COPY_WORKDIR =
       "hdds.datanode.replication.work.dir";
 
-  public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om."
-      + "kerberos.keytab.file";
-  public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om"
-      + ".kerberos.principal";
-
   /**
    * There is no need to instantiate this class.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index dd0aca3..18450a9 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -433,7 +433,6 @@
     </description>
   </property>
   <property>
-<<<<<<< HEAD
     <name>ozone.om.keytab.file</name>
     <value/>
     <tag>OM, SECURITY</tag>
@@ -443,9 +442,6 @@
   </property>
   <property>
     <name>ozone.om.db.cache.size.mb</name>
-=======
-    <name>ozone.ksm.db.cache.size.mb</name>
->>>>>>> HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.
     <value>128</value>
     <tag>OM, PERFORMANCE</tag>
     <description>
@@ -933,7 +929,7 @@
   </property>
 
   <property>
-    <name>hdds.scm.container.creation.lease.timeout</name>
+    <name>ozone.scm.container.creation.lease.timeout</name>
     <value>60s</value>
     <tag>OZONE, SCM</tag>
     <description>
@@ -987,11 +983,7 @@
     </description>
   </property>
   <property>
-<<<<<<< HEAD
     <name>hdds.container.close.threshold</name>
-=======
-    <name>hdds.scm.container.close.threshold</name>
->>>>>>> HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.
     <value>0.9f</value>
     <tag>OZONE, DATANODE</tag>
     <description>
@@ -1123,6 +1115,7 @@
       to occur.
     </description>
   </property>
+
   <property>
     <name>hdds.lock.suppress.warning.interval.ms</name>
     <value>10000</value>
@@ -1132,13 +1125,7 @@
       consecutive warnings within this interval.
     </description>
   </property>
-  <property>
-    <name>ozone.security.enabled</name>
-    <value>false</value>
-    <tag> OZONE, SECURITY, FLAG</tag>
-    <description>True if security is enabled for ozone. When this property is true, hadoop.security.authentication should be Kerberos.
-    </description>
-  </property>
+
   <property>
     <name>hdds.command.status.report.interval</name>
     <value>30s</value>
@@ -1206,8 +1193,8 @@
       datanode unless the datanode confirms the completion.
     </description>
   </property>
+
   <property>
-<<<<<<< HEAD
     <name>hdds.db.profile</name>
     <value>SSD</value>
     <tag>OZONE, OM, PERFORMANCE, REQUIRED</tag>
@@ -1215,6 +1202,7 @@
     that tunes the RocksDB settings for the hardware it is running
     on. Right now, we have SSD and DISK as profile options.</description>
   </property>
+
   <property>
     <name>hdds.datanode.replication.work.dir</name>
     <tag>DATANODE</tag>
@@ -1235,10 +1223,15 @@
   </property>
 
   <property>
-    <name>ozone.scm.kerberos.keytab.file</name>
-=======
+    <name>ozone.security.enabled</name>
+    <value>false</value>
+    <tag> OZONE, SECURITY, FLAG</tag>
+    <description>True if security is enabled for ozone. When this property is true, hadoop.security.authentication should be Kerberos.
+    </description>
+  </property>
+
+  <property>
     <name>hdds.scm.kerberos.keytab.file</name>
->>>>>>> HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.
     <value></value>
     <tag> OZONE, SECURITY</tag>
     <description> The keytab file used by each SCM daemon to login as its

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 47830e6..03ca9ab 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -433,7 +433,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     out.println(USAGE + "\n");
   }
 
-<<<<<<< HEAD
   /**
    * Create an SCM instance based on the supplied command-line arguments.
    *
@@ -444,11 +443,12 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    * @param args command line arguments.
    * @param conf HDDS configuration
    * @return SCM instance
-   * @throws IOException
+   * @throws IOException, AuthenticationException
    */
   @VisibleForTesting
   public static StorageContainerManager createSCM(
-      String[] args, OzoneConfiguration conf) throws IOException {
+      String[] args, OzoneConfiguration conf)
+      throws IOException, AuthenticationException {
     return createSCM(args, conf, false);
   }
 
@@ -459,7 +459,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    * @param conf HDDS configuration
    * @param printBanner if true, then log a verbose startup message.
    * @return SCM instance
-   * @throws IOException
+   * @throws IOException, AuthenticationException
    */
   private static StorageContainerManager createSCM(
       String[] args,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index da936ad..41dd89a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -62,11 +62,11 @@ public class StorageContainerManagerHttpServer extends BaseHttpServer {
   }
 
   @Override protected String getKeytabFile() {
-    return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
+    return ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY;
   }
 
   @Override protected String getSpnegoPrincipal() {
-    return ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
+    return ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
   }
 
   @Override protected String getEnabledKey() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index e70e444..d1a1235 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -106,7 +107,7 @@ public class TestBlockManager implements EventHandler<Boolean> {
   }
 
   private static StorageContainerManager getScm(OzoneConfiguration conf)
-      throws IOException {
+      throws IOException, AuthenticationException {
     conf.setBoolean(OZONE_ENABLED, true);
     SCMStorage scmStore = new SCMStorage(conf);
     if(scmStore.getState() != StorageState.INITIALIZED) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
index 3254735..199d52c 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
@@ -14,4 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONEDIR=../../../hadoop-dist/target/ozone
\ No newline at end of file
+OZONEDIR=../../../../../../hadoop-dist/target/ozone-0.3.0-SNAPSHOT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
index 360b69a..cdf72ce 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
@@ -22,7 +22,7 @@ OZONE-SITE.XML_hdds.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_hdds.scm.client.address=scm
-OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+OZONE-SITE.XML_ozone.replication=1
 OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
 OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
 OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM
@@ -32,21 +32,12 @@ OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.principal=HTTP/scm@EXAMPLE.C
 OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
 OZONE-SITE.XML_ozone.om.web.authentication.kerberos.principal=HTTP/om@EXAMPLE.COM
 OZONE-SITE.XML_ozone.om.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.scm.block.client.address=scm
-OZONE-SITE.XML_ozone.scm.client.address=scm
-HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
-HDFS-SITE.XML_dfs.block.access.token.enable=true
-HDFS-SITE.XML_dfs.namenode.kerberos.principal=nn/namenode@EXAMPLE.COM
-HDFS-SITE.XML_dfs.namenode.keytab.file=/etc/security/keytabs/nn.keytab
 HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/datanode@EXAMPLE.COM
 HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
-HDFS-SITE.XML_dfs.namenode.kerberos.internal.spnego.principal=HTTP/namenode@EXAMPLE.COM
 HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
 HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
 HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
 HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
-HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 CORE-SITE.XML_dfs.data.transfer.protection=authentication
 CORE-SITE.XML_hadoop.security.authentication=kerberos
 CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*)s/.*/root/
@@ -55,9 +46,51 @@ LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
 
+
+#Enable this variable to print out all hadoop rpc traffic to the stdout. See http://byteman.jboss.org/ to define your own instrumentation.
+#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
+
+#LOG4J2.PROPERTIES_* are for Ozone Audit Logging
+LOG4J2.PROPERTIES_monitorInterval=30
+LOG4J2.PROPERTIES_filter=read,write
+LOG4J2.PROPERTIES_filter.read.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.read.marker=READ
+LOG4J2.PROPERTIES_filter.read.onMatch=DENY
+LOG4J2.PROPERTIES_filter.read.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.type=MarkerFilter
+LOG4J2.PROPERTIES_filter.write.marker=WRITE
+LOG4J2.PROPERTIES_filter.write.onMatch=NEUTRAL
+LOG4J2.PROPERTIES_filter.write.onMismatch=NEUTRAL
+LOG4J2.PROPERTIES_appenders=console, rolling
+LOG4J2.PROPERTIES_appender.console.type=Console
+LOG4J2.PROPERTIES_appender.console.name=STDOUT
+LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
+LOG4J2.PROPERTIES_appender.rolling.fileName =${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{yyyy-MM-dd-HH-mm-ss}-%i.log.gz
+LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
+LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | %c{1} | %msg | %throwable{3} %n
+LOG4J2.PROPERTIES_appender.rolling.policies.type=Policies
+LOG4J2.PROPERTIES_appender.rolling.policies.time.type=TimeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.time.interval=86400
+LOG4J2.PROPERTIES_appender.rolling.policies.size.type=SizeBasedTriggeringPolicy
+LOG4J2.PROPERTIES_appender.rolling.policies.size.size=64MB
+LOG4J2.PROPERTIES_loggers=audit
+LOG4J2.PROPERTIES_logger.audit.type=AsyncLogger
+LOG4J2.PROPERTIES_logger.audit.name=OMAudit
+LOG4J2.PROPERTIES_logger.audit.level=INFO
+LOG4J2.PROPERTIES_logger.audit.appenderRefs=rolling
+LOG4J2.PROPERTIES_logger.audit.appenderRef.file.ref=RollingFile
+LOG4J2.PROPERTIES_rootLogger.level=INFO
+LOG4J2.PROPERTIES_rootLogger.appenderRefs=stdout
+LOG4J2.PROPERTIES_rootLogger.appenderRef.stdout.ref=STDOUT
+
+
 OZONE_DATANODE_SECURE_USER=root
 CONF_DIR=/etc/security/keytabs
-KERBEROS_KEYTABS=dn nn om scm HTTP testuser
+KERBEROS_KEYTABS=dn om scm HTTP testuser
 KERBEROS_KEYSTORES=hadoop
 KERBEROS_SERVER=ozone.kdc
 JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
deleted file mode 100644
index cc25dbe..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-/**
- * KSM Constants.
- */
-public final class KSMConfigKeys {
-  /**
-   * Never constructed.
-   */
-  private KSMConfigKeys() {
-  }
-
-
-  public static final String OZONE_KSM_HANDLER_COUNT_KEY =
-      "ozone.ksm.handler.count.key";
-  public static final int OZONE_KSM_HANDLER_COUNT_DEFAULT = 20;
-
-  public static final String OZONE_KSM_ADDRESS_KEY =
-      "ozone.ksm.address";
-  public static final String OZONE_KSM_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-  public static final int OZONE_KSM_PORT_DEFAULT = 9862;
-
-  public static final String OZONE_KSM_HTTP_ENABLED_KEY =
-      "ozone.ksm.http.enabled";
-  public static final String OZONE_KSM_HTTP_BIND_HOST_KEY =
-      "ozone.ksm.http-bind-host";
-  public static final String OZONE_KSM_HTTPS_BIND_HOST_KEY =
-      "ozone.ksm.https-bind-host";
-  public static final String OZONE_KSM_HTTP_ADDRESS_KEY =
-      "ozone.ksm.http-address";
-  public static final String OZONE_KSM_HTTPS_ADDRESS_KEY =
-      "ozone.ksm.https-address";
-  public static final String OZONE_KSM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_KSM_HTTP_BIND_PORT_DEFAULT = 9874;
-  public static final int OZONE_KSM_HTTPS_BIND_PORT_DEFAULT = 9875;
-
-  // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB.
-  public static final String OZONE_KSM_DB_CACHE_SIZE_MB =
-      "ozone.ksm.db.cache.size.mb";
-  public static final int OZONE_KSM_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_KSM_USER_MAX_VOLUME =
-      "ozone.ksm.user.max.volume";
-  public static final int OZONE_KSM_USER_MAX_VOLUME_DEFAULT = 1024;
-
-  // KSM Default user/group permissions
-  public static final String OZONE_KSM_USER_RIGHTS =
-      "ozone.ksm.user.rights";
-  public static final OzoneAcl.OzoneACLRights OZONE_KSM_USER_RIGHTS_DEFAULT =
-      OzoneAcl.OzoneACLRights.READ_WRITE;
-
-  public static final String OZONE_KSM_GROUP_RIGHTS =
-      "ozone.ksm.group.rights";
-  public static final OzoneAcl.OzoneACLRights OZONE_KSM_GROUP_RIGHTS_DEFAULT =
-      OzoneAcl.OzoneACLRights.READ_WRITE;
-
-  public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
-      "ozone.key.deleting.limit.per.task";
-  public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
-
-  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
-      "ozone.om.web.authentication.kerberos.principal";
-  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
-      "ozone.om.web.authentication.kerberos.keytab";
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index d13efb4..e2da410 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import java.io.IOException;
@@ -142,7 +143,7 @@ public interface MiniOzoneCluster {
    * @throws InterruptedException
    */
   void restartStorageContainerManager() throws InterruptedException,
-      TimeoutException, IOException;
+      TimeoutException, IOException, AuthenticationException;
 
   /**
    * Restarts OzoneManager instance.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 113fea8..2fe499e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -217,7 +217,7 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
 
   @Override
   public void restartStorageContainerManager()
-      throws TimeoutException, InterruptedException, IOException {
+      throws TimeoutException, InterruptedException, IOException, AuthenticationException {
     scm.stop();
     scm.join();
     scm = StorageContainerManager.createSCM(null, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5641534e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 3fb5499..14c6800 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -89,10 +89,6 @@ public final class TestSecureOzoneCluster {
       startMiniKdc();
       setSecureConfig(conf);
       createCredentialsInKDC(conf, miniKdc);
-
-      clusterId = UUID.randomUUID().toString();
-      scmId = UUID.randomUUID().toString();
-      omId = UUID.randomUUID().toString();
     } catch (IOException e) {
       LOGGER.error("Failed to initialize TestSecureOzoneCluster", e);
     } catch (Exception e) {
@@ -120,11 +116,12 @@ public final class TestSecureOzoneCluster {
     createPrincipal(scmKeytab,
         conf.get(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY));
      createPrincipal(spnegoKeytab,
-         conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
-         conf.get(OMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
-    createPrincipal(omKeyTab,
+         conf.get(ScmConfigKeys
+             .HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
         conf.get(OMConfigKeys
-            .OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+            .OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
+    createPrincipal(omKeyTab,
+        conf.get(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY));
   }
 
   private void createPrincipal(File keytab, String... principal)
@@ -156,7 +153,7 @@ public final class TestSecureOzoneCluster {
 
     conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
         "scm/" + host + "@" + realm);
-    conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
         "HTTP_SCM/" + host + "@" + realm);
 
     conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
@@ -170,7 +167,7 @@ public final class TestSecureOzoneCluster {
 
     conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
         scmKeytab.getAbsolutePath());
-    conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
         spnegoKeytab.getAbsolutePath());
     conf.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
         omKeyTab.getAbsolutePath());
@@ -189,6 +186,11 @@ public final class TestSecureOzoneCluster {
 
   private void initSCM()
       throws IOException, AuthenticationException {
+
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+
     final String path = GenericTestUtils
         .getTempPath(UUID.randomUUID().toString());
     Path scmPath = Paths.get(path, "scm-meta");


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/249df9a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/249df9a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/249df9a6

Branch: refs/heads/HDDS-4
Commit: 249df9a6d8939b692b2306ff204be8c20ed41fdb
Parents: dd5b958
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue May 22 13:32:28 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Sep 20 15:13:07 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  6 +-
 .../scm/protocol/ScmBlockLocationProtocol.java  |  2 +-
 .../StorageContainerLocationProtocol.java       |  3 +-
 .../protocolPB/ScmBlockLocationProtocolPB.java  |  4 +-
 .../StorageContainerLocationProtocolPB.java     |  2 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java    |  6 +-
 .../common/src/main/resources/ozone-default.xml | 36 ++++++---
 .../StorageContainerDatanodeProtocol.java       |  2 +-
 .../StorageContainerDatanodeProtocolPB.java     |  2 +-
 .../scm/server/StorageContainerManager.java     | 12 +--
 .../compose/compose-secure/docker-compose.yaml  |  6 +-
 .../test/compose/compose-secure/docker-config   | 12 +--
 .../acceptance/ozone-secure.robot               | 12 +--
 .../ozone/client/protocol/ClientProtocol.java   |  2 +-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  | 84 ++++++++++++++++++++
 .../ozone/om/protocol/OzoneManagerProtocol.java |  4 +-
 .../hadoop/ozone/TestSecureOzoneCluster.java    | 21 +++--
 .../apache/hadoop/ozone/om/OzoneManager.java    |  4 +-
 18 files changed, 157 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index e8b0930..4b0e7c8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -158,9 +158,9 @@ public final class ScmConfigKeys {
       "ozone.scm.http-address";
   public static final String OZONE_SCM_HTTPS_ADDRESS_KEY =
       "ozone.scm.https-address";
-  public static final String OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY =
-      "ozone.scm.kerberos.keytab.file";
-  public static final String OZONE_SCM_KERBEROS_PRINCIPAL_KEY = "ozone.scm.kerberos.principal";
+  public static final String HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY =
+      "hdds.scm.kerberos.keytab.file";
+  public static final String HDDS_SCM_KERBEROS_PRINCIPAL_KEY = "hdds.scm.kerberos.principal";
   public static final String OZONE_SCM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
index e17f1c2..2d46ae0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java
@@ -33,7 +33,7 @@ import java.util.List;
  * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes
  * to read/write a block.
  */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 454b9f3..33a453c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.hdds.scm.protocol;
 
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
@@ -35,7 +34,7 @@ import org.apache.hadoop.security.KerberosInfo;
  * ContainerLocationProtocol is used by an HDFS node to find the set of nodes
  * that currently host a container.
  */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerLocationProtocol {
   /**
    * Asks SCM where a container should be allocated. SCM responds with the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
index 89bb066..06bbd05 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java
@@ -18,11 +18,9 @@
 package org.apache.hadoop.hdds.scm.protocolPB;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos
     .ScmBlockLocationProtocolService;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.security.KerberosInfo;
 
@@ -35,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
     protocolVersion = 1)
 @InterfaceAudience.Private
 @KerberosInfo(
-    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ScmBlockLocationProtocolPB
     extends ScmBlockLocationProtocolService.BlockingInterface {
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
index 3bd83f9..f80ba20 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
     "org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol",
     protocolVersion = 1)
 @KerberosInfo(
-    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface StorageContainerLocationProtocolPB
     extends StorageContainerLocationProtocolService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index fb83052..944605b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -297,12 +297,16 @@ public final class OzoneConfigKeys {
   public static final long
       HDDS_LOCK_SUPPRESS_WARNING_INTERVAL_MS_DEAFULT = 10000L;
   public static final String OZONE_SECURITY_ENABLED_KEY = "ozone.security.enabled";
-  public static final String OZONE_SYSTEM_TAGS_KEY = "ozone.system.tags";
   public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false;
 
   public static final String OZONE_CONTAINER_COPY_WORKDIR =
       "hdds.datanode.replication.work.dir";
 
+  public static final String OZONE_OM_KERBEROS_KEYTAB_FILE_KEY = "ozone.om."
+      + "kerberos.keytab.file";
+  public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om"
+      + ".kerberos.principal";
+
   /**
    * There is no need to instantiate this class.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index efb5aa6..d450b37 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -433,6 +433,7 @@
     </description>
   </property>
   <property>
+<<<<<<< HEAD
     <name>ozone.om.keytab.file</name>
     <value/>
     <tag>OM, SECURITY</tag>
@@ -442,6 +443,9 @@
   </property>
   <property>
     <name>ozone.om.db.cache.size.mb</name>
+=======
+    <name>ozone.ksm.db.cache.size.mb</name>
+>>>>>>> HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.
     <value>128</value>
     <tag>OM, PERFORMANCE</tag>
     <description>
@@ -929,7 +933,7 @@
   </property>
 
   <property>
-    <name>ozone.scm.container.creation.lease.timeout</name>
+    <name>hdds.scm.container.creation.lease.timeout</name>
     <value>60s</value>
     <tag>OZONE, SCM</tag>
     <description>
@@ -983,7 +987,11 @@
     </description>
   </property>
   <property>
+<<<<<<< HEAD
     <name>hdds.container.close.threshold</name>
+=======
+    <name>hdds.scm.container.close.threshold</name>
+>>>>>>> HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.
     <value>0.9f</value>
     <tag>OZONE, DATANODE</tag>
     <description>
@@ -1199,6 +1207,7 @@
     </description>
   </property>
   <property>
+<<<<<<< HEAD
     <name>hdds.db.profile</name>
     <value>SSD</value>
     <tag>OZONE, OM, PERFORMANCE, REQUIRED</tag>
@@ -1227,15 +1236,18 @@
 
   <property>
     <name>ozone.scm.kerberos.keytab.file</name>
+=======
+    <name>hdds.scm.kerberos.keytab.file</name>
+>>>>>>> HDDS-70. Fix config names for secure ksm and scm. Contributed by Ajay Kumar.
     <value></value>
     <tag> OZONE, SECURITY</tag>
     <description> The keytab file used by each SCM daemon to login as its
       service principal. The principal name is configured with
-      ozone.scm.kerberos.principal.
+      hdds.scm.kerberos.principal.
     </description>
   </property>
   <property>
-    <name>ozone.scm.kerberos.principal</name>
+    <name>hdds.scm.kerberos.principal</name>
     <value></value>
     <tag> OZONE, SECURITY</tag>
     <description>The SCM service principal. Ex scm/_HOST@REALM.COM</description>
@@ -1245,38 +1257,38 @@
     <name>ozone.om.kerberos.keytab.file</name>
     <value></value>
     <tag> OZONE, SECURITY</tag>
-    <description> The keytab file used by KSM daemon to login as its
+    <description> The keytab file used by OzoneManager daemon to login as its
       service principal. The principal name is configured with
-      hdds.ksm.kerberos.principal.
+      ozone.om.kerberos.principal.
     </description>
   </property>
   <property>
     <name>ozone.om.kerberos.principal</name>
     <value></value>
     <tag> OZONE, SECURITY</tag>
-    <description>The KSM service principal. Ex ksm/_HOST@REALM.COM</description>
+    <description>The OzoneManager service principal. Ex om/_HOST@REALM.COM</description>
   </property>
 
   <property>
-    <name>ozone.scm.web.authentication.kerberos.principal</name>
+    <name>hdds.scm.web.authentication.kerberos.principal</name>
     <value>HTTP/_HOST@EXAMPLE.COM</value>
   </property>
   <property>
-    <name>ozone.scm.web.authentication.kerberos.keytab</name>
+    <name>hdds.scm.web.authentication.kerberos.keytab</name>
     <value>/etc/security/keytabs/HTTP.keytab</value>
   </property>
   <property>
-    <name>hdds.ksm.web.authentication.kerberos.principal</name>
+    <name>ozone.om.web.authentication.kerberos.principal</name>
     <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>
-      KSM http server kerberos principal.
+      OzoneManager http server kerberos principal.
     </description>
   </property>
   <property>
-    <name>hdds.ksm.web.authentication.kerberos.keytab</name>
+    <name>ozone.om.web.authentication.kerberos.keytab</name>
     <value>/etc/security/keytabs/HTTP.keytab</value>
     <description>
-      KSM http server kerberos keytab.
+      OzoneManager http server kerberos keytab.
     </description>
   </property>
 </configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index b723d28..48f7d8a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.security.KerberosInfo;
  * Protoc file that defines this protocol.
  */
 @KerberosInfo(
-    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface StorageContainerDatanodeProtocol {
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
index 9c32ef8..9006e91 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
     "org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol",
     protocolVersion = 1)
 @KerberosInfo(
-    serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+    serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
     clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerDatanodeProtocolPB extends
     StorageContainerDatanodeProtocolService.BlockingInterface {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 7d1ebb4..47830e6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -107,8 +107,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_M
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 /**
@@ -334,16 +334,16 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
       throws IOException, AuthenticationException {
     LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
             + "Principal: {}, keytab: {}", this.scmConf.get
-            (OZONE_SCM_KERBEROS_PRINCIPAL_KEY),
-        this.scmConf.get(OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY));
+            (HDDS_SCM_KERBEROS_PRINCIPAL_KEY),
+        this.scmConf.get(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY));
 
     if (SecurityUtil.getAuthenticationMethod(conf).equals
         (AuthenticationMethod.KERBEROS)) {
       UserGroupInformation.setConfiguration(this.scmConf);
       InetSocketAddress socAddr = HddsServerUtil
           .getScmBlockClientBindAddress(conf);
-      SecurityUtil.login(conf, OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
-          OZONE_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
+      SecurityUtil.login(conf, HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
+          HDDS_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
     } else {
       throw new AuthenticationException(SecurityUtil.getAuthenticationMethod
           (conf) + " authentication method not support. "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
index 2661163..db211bc 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
@@ -40,15 +40,15 @@ services:
       env_file:
         - ./docker-config
       command: ["/opt/hadoop/bin/ozone","datanode"]
-   ksm:
+   om:
       image: ahadoop/ozone:v1
-      hostname: ksm
+      hostname: om
       volumes:
          - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9874:9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_KSM_INITIALIZED: /data/metadata/om/current/VERSION
       env_file:
           - ./docker-config
       command: ["/opt/hadoop/bin/ozone","ksm"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
index 678c75a..360b69a 100644
--- a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.ksm.address=om
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_hdds.scm.datanode.id=/data/datanode.id
@@ -25,13 +25,13 @@ OZONE-SITE.XML_hdds.scm.client.address=scm
 OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
 OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
 OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
-OZONE-SITE.XML_ozone.ksm.kerberos.principal=ksm/ksm@EXAMPLE.COM
-OZONE-SITE.XML_ozone.ksm.kerberos.keytab.file=/etc/security/keytabs/ksm.keytab
+OZONE-SITE.XML_ozone.om.kerberos.principal=om/om@EXAMPLE.COM
+OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
 OZONE-SITE.XML_ozone.security.enabled=true
 OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.principal=HTTP/scm@EXAMPLE.COM
 OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
-OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.principal=HTTP/ksm@EXAMPLE.COM
-OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.om.web.authentication.kerberos.principal=HTTP/om@EXAMPLE.COM
+OZONE-SITE.XML_ozone.om.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
 OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.scm.client.address=scm
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
@@ -57,7 +57,7 @@ LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH
 
 OZONE_DATANODE_SECURE_USER=root
 CONF_DIR=/etc/security/keytabs
-KERBEROS_KEYTABS=dn nn ksm scm HTTP testuser
+KERBEROS_KEYTABS=dn nn om scm HTTP testuser
 KERBEROS_KEYSTORES=hadoop
 KERBEROS_SERVER=ozone.kdc
 JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
index 4a78980..7fc1088 100644
--- a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
@@ -26,7 +26,7 @@ ${version}
 *** Test Cases ***
 
 Daemons are running
-    Is daemon running           ksm
+    Is daemon running           om
     Is daemon running           scm
     Is daemon running           datanode
     Is daemon running           ozone.kdc
@@ -45,15 +45,15 @@ Test rest interface
                     Should contain      ${result}       200 OK
 
 Test ozone cli
-    ${result} =     Execute on      1   datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on      1   datanode        ozone oz -createVolume o3://om/hive -user bilbo -quota 100TB -root
                     Should contain      ${result}       Client cannot authenticate via
                     # Authenticate testuser
                     Execute on      0   datanode        kinit -k testuser/datanode@EXAMPLE.COM -t /etc/security/keytabs/testuser.keytab
-                    Execute on      0   datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
-    ${result} =     Execute on      0   datanode        ozone oz -listVolume o3://ksm/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Execute on      0   datanode        ozone oz -createVolume o3://om/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on      0   datanode        ozone oz -listVolume o3://om/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
                     Should contain      ${result}       createdOn
-                    Execute on      0   datanode        ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB
-    ${result} =     Execute on      0   datanode        ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Execute on      0   datanode        ozone oz -updateVolume o3://om/hive -user bill -quota 10TB
+    ${result} =     Execute on      0   datanode        ozone oz -infoVolume o3://om/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
                     Should Be Equal     ${result}       bill
 
 *** Keywords ***

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index f3c710b..6183b8b 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.security.KerberosInfo;
  * includes: {@link org.apache.hadoop.ozone.client.rpc.RpcClient} for RPC and
  * {@link  org.apache.hadoop.ozone.client.rest.RestClient} for REST.
  */
-@KerberosInfo(serverPrincipal = ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY)
+@KerberosInfo(serverPrincipal = ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY)
 public interface ClientProtocol {
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
new file mode 100644
index 0000000..cc25dbe
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.ksm;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+/**
+ * KSM Constants.
+ */
+public final class KSMConfigKeys {
+  /**
+   * Never constructed.
+   */
+  private KSMConfigKeys() {
+  }
+
+
+  public static final String OZONE_KSM_HANDLER_COUNT_KEY =
+      "ozone.ksm.handler.count.key";
+  public static final int OZONE_KSM_HANDLER_COUNT_DEFAULT = 20;
+
+  public static final String OZONE_KSM_ADDRESS_KEY =
+      "ozone.ksm.address";
+  public static final String OZONE_KSM_BIND_HOST_DEFAULT =
+      "0.0.0.0";
+  public static final int OZONE_KSM_PORT_DEFAULT = 9862;
+
+  public static final String OZONE_KSM_HTTP_ENABLED_KEY =
+      "ozone.ksm.http.enabled";
+  public static final String OZONE_KSM_HTTP_BIND_HOST_KEY =
+      "ozone.ksm.http-bind-host";
+  public static final String OZONE_KSM_HTTPS_BIND_HOST_KEY =
+      "ozone.ksm.https-bind-host";
+  public static final String OZONE_KSM_HTTP_ADDRESS_KEY =
+      "ozone.ksm.http-address";
+  public static final String OZONE_KSM_HTTPS_ADDRESS_KEY =
+      "ozone.ksm.https-address";
+  public static final String OZONE_KSM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
+  public static final int OZONE_KSM_HTTP_BIND_PORT_DEFAULT = 9874;
+  public static final int OZONE_KSM_HTTPS_BIND_PORT_DEFAULT = 9875;
+
+  // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB.
+  public static final String OZONE_KSM_DB_CACHE_SIZE_MB =
+      "ozone.ksm.db.cache.size.mb";
+  public static final int OZONE_KSM_DB_CACHE_SIZE_DEFAULT = 128;
+
+  public static final String OZONE_KSM_USER_MAX_VOLUME =
+      "ozone.ksm.user.max.volume";
+  public static final int OZONE_KSM_USER_MAX_VOLUME_DEFAULT = 1024;
+
+  // KSM Default user/group permissions
+  public static final String OZONE_KSM_USER_RIGHTS =
+      "ozone.ksm.user.rights";
+  public static final OzoneAcl.OzoneACLRights OZONE_KSM_USER_RIGHTS_DEFAULT =
+      OzoneAcl.OzoneACLRights.READ_WRITE;
+
+  public static final String OZONE_KSM_GROUP_RIGHTS =
+      "ozone.ksm.group.rights";
+  public static final OzoneAcl.OzoneACLRights OZONE_KSM_GROUP_RIGHTS_DEFAULT =
+      OzoneAcl.OzoneACLRights.READ_WRITE;
+
+  public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
+      "ozone.key.deleting.limit.per.task";
+  public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+
+  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
+      "ozone.om.web.authentication.kerberos.principal";
+  public static final String OZONE_OM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE =
+      "ozone.om.web.authentication.kerberos.keytab";
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
index 6f11e08..ac19b05 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -26,10 +26,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
 import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
 
-
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.OzoneAclInfo;
 import java.io.IOException;
 import java.util.List;
 import org.apache.hadoop.security.KerberosInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
index 5c96067..3fb5499 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java
@@ -118,10 +118,10 @@ public final class TestSecureOzoneCluster {
   private void createCredentialsInKDC(Configuration conf, MiniKdc miniKdc)
       throws Exception {
     createPrincipal(scmKeytab,
-        conf.get(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY));
-    createPrincipal(spnegoKeytab,
-        conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
-        conf.get(OMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
+        conf.get(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY));
+     createPrincipal(spnegoKeytab,
+         conf.get(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY),
+         conf.get(OMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
     createPrincipal(omKeyTab,
         conf.get(OMConfigKeys
             .OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY));
@@ -154,7 +154,7 @@ public final class TestSecureOzoneCluster {
         "kerberos");
     conf.set(OZONE_ADMINISTRATORS, curUser);
 
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
         "scm/" + host + "@" + realm);
     conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
         "HTTP_SCM/" + host + "@" + realm);
@@ -162,19 +162,18 @@ public final class TestSecureOzoneCluster {
     conf.set(OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY,
         "om/" + host + "@" + realm);
     conf.set(OMConfigKeys.OZONE_OM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
-        "HTTP_KSM/" + host + "@" + realm);
+        "HTTP_OM/" + host + "@" + realm);
 
     scmKeytab = new File(workDir, "scm.keytab");
     spnegoKeytab = new File(workDir, "http.keytab");
     omKeyTab = new File(workDir, "om.keytab");
 
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
         scmKeytab.getAbsolutePath());
     conf.set(ScmConfigKeys.SCM_WEB_AUTHENTICATION_KERBEROS_KEYTAB_FILE_KEY,
         spnegoKeytab.getAbsolutePath());
     conf.set(OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY,
         omKeyTab.getAbsolutePath());
-
   }
 
   @Test
@@ -205,7 +204,7 @@ public final class TestSecureOzoneCluster {
   @Test
   public void testSecureScmStartupFailure() throws Exception {
     initSCM();
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, "");
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
         "kerberos");
 
@@ -215,9 +214,9 @@ public final class TestSecureOzoneCluster {
           StorageContainerManager.createSCM(null, conf);
         });
 
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_PRINCIPAL_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
         "scm/_HOST@EXAMPLE.com");
-    conf.set(ScmConfigKeys.OZONE_SCM_KERBEROS_KEYTAB_FILE_KEY,
+    conf.set(ScmConfigKeys.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
         "/etc/security/keytabs/scm.keytab");
 
     testCommonKerberosFailures(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/249df9a6/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index db14063d..8f80678 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -188,8 +188,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     if (SecurityUtil.getAuthenticationMethod(conf).equals
         (AuthenticationMethod.KERBEROS)) {
       LOG.debug("Ozone security is enabled. Attempting login for KSM user. "
-              + "Principal: {},keytab: {}", conf.get
-              (OZONE_OM_KERBEROS_PRINCIPAL_KEY),
+              + "Principal: {},keytab: {}", conf.get(
+          OZONE_OM_KERBEROS_PRINCIPAL_KEY),
           conf.get(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY));
 
       UserGroupInformation.setConfiguration(conf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDDS-509. TestStorageContainerManager is flaky. Contributed by Xiaoyu Yao and Ajay Kumar.

Posted by xy...@apache.org.
HDDS-509. TestStorageContainerManager is flaky.
Contributed by Xiaoyu Yao and Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98c9bc4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98c9bc4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98c9bc4a

Branch: refs/heads/HDDS-4
Commit: 98c9bc4aa0ef6358e1ea6ca2f18cec7ae24158dd
Parents: 090272d
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Sep 19 12:57:44 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Sep 19 13:03:19 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/ozone/TestStorageContainerManager.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98c9bc4a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 3d9a043..94ab6c8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -588,11 +588,10 @@ public class TestStorageContainerManager {
     helper.createKeys(10, 4096);
     SCMClientProtocolServer clientProtocolServer = cluster
         .getStorageContainerManager().getClientProtocolServer();
-
+    assertFalse((scm.getClientProtocolServer()).getChillModeStatus());
     final List<ContainerInfo> containers = scm.getScmContainerManager()
         .getStateManager().getAllContainers();
     scm.getEventQueue().fireEvent(SCMEvents.CHILL_MODE_STATUS, true);
-    assertFalse((scm.getClientProtocolServer()).getChillModeStatus());
     GenericTestUtils.waitFor(() -> {
       return clientProtocolServer.getChillModeStatus();
     }, 50, 1000 * 5);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDDS-488. Handle chill mode exception from SCM in OzoneManager. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-488. Handle chill mode exception from SCM in OzoneManager. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39296537
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39296537
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39296537

Branch: refs/heads/HDDS-4
Commit: 39296537076d19b3713a75f8453d056884c49be6
Parents: 17f5651
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Sep 18 19:58:09 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Sep 18 20:05:25 2018 -0700

----------------------------------------------------------------------
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |   2 +-
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |   2 +-
 .../hadoop/ozone/om/TestScmChillMode.java       | 171 +++++++++++++++++++
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  32 +++-
 .../hadoop/ozone/om/exceptions/OMException.java |   3 +-
 .../hadoop/ozone/om/TestKeyManagerImpl.java     | 165 ++++++++++++++++++
 6 files changed, 367 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39296537/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
index 0a136a7..5199ce3 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -167,7 +167,7 @@ public final class OmBucketInfo implements Auditable {
     private StorageType storageType;
     private long creationTime;
 
-    Builder() {
+    public Builder() {
       //Default values
       this.acls = new LinkedList<>();
       this.isVersionEnabled = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39296537/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
index 27e25f9..165d9ab 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -151,7 +151,7 @@ public final class OmVolumeArgs implements Auditable{
     /**
      * Constructs a builder.
      */
-    Builder() {
+    public Builder() {
       keyValueMap = new HashMap<>();
       aclMap = new OmOzoneAclMap();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39296537/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
new file mode 100644
index 0000000..954fa0f
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.TestStorageContainerManagerHelper;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.util.Time;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import static org.junit.Assert.fail;
+
+/**
+ * Test Ozone Manager operation in distributed handler scenario.
+ */
+public class TestScmChillMode {
+
+  private static MiniOzoneCluster cluster = null;
+  private static MiniOzoneCluster.Builder builder = null;
+  private static OzoneConfiguration conf;
+  private static OzoneManager om;
+
+
+  @Rule
+  public Timeout timeout = new Timeout(1000 * 200);
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @Before
+  public void init() throws Exception {
+    conf = new OzoneConfiguration();
+    builder = MiniOzoneCluster.newBuilder(conf)
+        .setHbInterval(1000)
+        .setHbProcessorInterval(500)
+        .setStartDataNodes(false);
+    cluster = builder.build();
+    cluster.startHddsDatanodes();
+    cluster.waitForClusterToBeReady();
+    om = cluster.getOzoneManager();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testChillModeOperations() throws Exception {
+    final AtomicReference<MiniOzoneCluster> miniCluster =
+        new AtomicReference<>();
+
+    try {
+      // Create {numKeys} random names keys.
+      TestStorageContainerManagerHelper helper =
+          new TestStorageContainerManagerHelper(cluster, conf);
+      Map<String, OmKeyInfo> keyLocations = helper.createKeys(100, 4096);
+      final List<ContainerInfo> containers = cluster
+          .getStorageContainerManager()
+          .getScmContainerManager().getStateManager().getAllContainers();
+      GenericTestUtils.waitFor(() -> {
+        return containers.size() > 10;
+      }, 100, 1000);
+
+      String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+      String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+      String keyName = "key" + RandomStringUtils.randomNumeric(5);
+      String userName = "user" + RandomStringUtils.randomNumeric(5);
+      String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+      OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(volumeName)
+          .setBucketName(bucketName)
+          .setKeyName(keyName)
+          .setDataSize(1000)
+          .build();
+      OmVolumeArgs volArgs = new OmVolumeArgs.Builder()
+          .setAdminName(adminName)
+          .setCreationTime(Time.monotonicNow())
+          .setQuotaInBytes(10000)
+          .setVolume(volumeName)
+          .setOwnerName(userName)
+          .build();
+      OmBucketInfo bucketInfo = new OmBucketInfo.Builder()
+          .setBucketName(bucketName)
+          .setIsVersionEnabled(false)
+          .setVolumeName(volumeName)
+          .build();
+      om.createVolume(volArgs);
+      om.createBucket(bucketInfo);
+      om.openKey(keyArgs);
+      //om.commitKey(keyArgs, 1);
+
+      cluster.stop();
+
+      new Thread(() -> {
+        try {
+          miniCluster.set(builder.build());
+        } catch (IOException e) {
+          fail("failed");
+        }
+      }).start();
+
+      StorageContainerManager scm;
+      GenericTestUtils.waitFor(() -> {
+        return miniCluster.get() != null;
+      }, 100, 1000 * 3);
+
+      scm = miniCluster.get().getStorageContainerManager();
+      Assert.assertTrue(scm.isInChillMode());
+
+      om = miniCluster.get().getOzoneManager();
+
+      LambdaTestUtils.intercept(OMException.class,
+          "ChillModePrecheck failed for allocateBlock",
+          () -> om.openKey(keyArgs));
+
+    } finally {
+      if (miniCluster.get() != null) {
+        try {
+          miniCluster.get().shutdown();
+        } catch (Exception e) {
+          // do nothing.
+        }
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39296537/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index c14d0d8..41b391a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -155,9 +156,18 @@ public class KeyManagerImpl implements KeyManager {
     }
     OmKeyInfo keyInfo =
         OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(keyData));
-    AllocatedBlock allocatedBlock =
-        scmBlockClient.allocateBlock(scmBlockSize, keyInfo.getType(),
-            keyInfo.getFactor(), omId);
+    AllocatedBlock allocatedBlock;
+    try {
+      allocatedBlock =
+          scmBlockClient.allocateBlock(scmBlockSize, keyInfo.getType(),
+              keyInfo.getFactor(), omId);
+    } catch (SCMException ex) {
+      if (ex.getResult()
+          .equals(SCMException.ResultCodes.CHILL_MODE_EXCEPTION)) {
+        throw new OMException(ex.getMessage(), ResultCodes.SCM_IN_CHILL_MODE);
+      }
+      throw ex;
+    }
     OmKeyLocationInfo info = new OmKeyLocationInfo.Builder()
         .setBlockID(allocatedBlock.getBlockID())
         .setShouldCreateContainer(allocatedBlock.getCreateContainer())
@@ -208,8 +218,20 @@ public class KeyManagerImpl implements KeyManager {
       // some blocks and piggyback to client, to save RPC calls.
       while (requestedSize > 0) {
         long allocateSize = Math.min(scmBlockSize, requestedSize);
-        AllocatedBlock allocatedBlock =
-            scmBlockClient.allocateBlock(allocateSize, type, factor, omId);
+        AllocatedBlock allocatedBlock;
+        try {
+          allocatedBlock = scmBlockClient
+              .allocateBlock(allocateSize, type, factor, omId);
+        } catch (IOException ex) {
+          if (ex instanceof SCMException) {
+            if (((SCMException) ex).getResult()
+                .equals(SCMException.ResultCodes.CHILL_MODE_EXCEPTION)) {
+              throw new OMException(ex.getMessage(),
+                  ResultCodes.SCM_IN_CHILL_MODE);
+            }
+          }
+          throw ex;
+        }
         OmKeyLocationInfo subKeyInfo = new OmKeyLocationInfo.Builder()
             .setBlockID(allocatedBlock.getBlockID())
             .setShouldCreateContainer(allocatedBlock.getCreateContainer())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39296537/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index 55cef97..393ac91 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -113,6 +113,7 @@ public class OMException extends IOException {
     FAILED_METADATA_ERROR,
     FAILED_INTERNAL_ERROR,
     OM_NOT_INITIALIZED,
-    SCM_VERSION_MISMATCH_ERROR
+    SCM_VERSION_MISMATCH_ERROR,
+    SCM_IN_CHILL_MODE
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39296537/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
new file mode 100644
index 0000000..d722155
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.db.RDBStore;
+import org.apache.hadoop.utils.db.Table;
+import org.apache.hadoop.utils.db.TableConfig;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+import org.rocksdb.ColumnFamilyOptions;
+import org.rocksdb.DBOptions;
+import org.rocksdb.RocksDB;
+import org.rocksdb.Statistics;
+import org.rocksdb.StatsLevel;
+
+/**
+ * Test class for @{@link KeyManagerImpl}.
+ * */
+public class TestKeyManagerImpl {
+
+  private static KeyManagerImpl keyManager;
+  private static ScmBlockLocationProtocol scmBlockLocationProtocol;
+  private static OzoneConfiguration conf;
+  private static OMMetadataManager metadataManager;
+  private static long blockSize = 1000;
+  private static final String KEY_NAME = "key1";
+  private static final String BUCKET_NAME = "bucket1";
+  private static final String VOLUME_NAME = "vol1";
+  private static RDBStore rdbStore = null;
+  private static Table rdbTable = null;
+  private static DBOptions options = null;
+  private KeyInfo keyData;
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new OzoneConfiguration();
+    scmBlockLocationProtocol = Mockito.mock(ScmBlockLocationProtocol.class);
+    metadataManager = Mockito.mock(OMMetadataManager.class);
+    keyManager = new KeyManagerImpl(scmBlockLocationProtocol, metadataManager,
+        conf, "om1");
+    setupMocks();
+  }
+
+  private void setupMocks() throws Exception {
+    Mockito.when(scmBlockLocationProtocol
+        .allocateBlock(Mockito.anyLong(), Mockito.any(ReplicationType.class),
+            Mockito.any(ReplicationFactor.class), Mockito.anyString()))
+        .thenThrow(
+            new SCMException("ChillModePrecheck failed for allocateBlock",
+                ResultCodes.CHILL_MODE_EXCEPTION));
+    setupRocksDb();
+    Mockito.when(metadataManager.getVolumeTable()).thenReturn(rdbTable);
+    Mockito.when(metadataManager.getBucketTable()).thenReturn(rdbTable);
+    Mockito.when(metadataManager.getOpenKeyTable()).thenReturn(rdbTable);
+    Mockito.when(metadataManager.getLock())
+        .thenReturn(new OzoneManagerLock(conf));
+    Mockito.when(metadataManager.getVolumeKey(VOLUME_NAME))
+        .thenReturn(VOLUME_NAME.getBytes());
+    Mockito.when(metadataManager.getBucketKey(VOLUME_NAME, BUCKET_NAME))
+        .thenReturn(BUCKET_NAME.getBytes());
+    Mockito.when(metadataManager.getOpenKeyBytes(VOLUME_NAME, BUCKET_NAME,
+        KEY_NAME, 1)).thenReturn(KEY_NAME.getBytes());
+  }
+
+  private void setupRocksDb() throws Exception {
+    options = new DBOptions();
+    options.setCreateIfMissing(true);
+    options.setCreateMissingColumnFamilies(true);
+
+    Statistics statistics = new Statistics();
+    statistics.setStatsLevel(StatsLevel.ALL);
+    options = options.setStatistics(statistics);
+
+    Set<TableConfig> configSet = new HashSet<>();
+    for (String name : Arrays
+        .asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+            "testTable")) {
+      TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions());
+      configSet.add(newConfig);
+    }
+    keyData = KeyInfo.newBuilder()
+        .setKeyName(KEY_NAME)
+        .setBucketName(BUCKET_NAME)
+        .setVolumeName(VOLUME_NAME)
+        .setDataSize(blockSize)
+        .setType(ReplicationType.STAND_ALONE)
+        .setFactor(ReplicationFactor.ONE)
+        .setCreationTime(Time.now())
+        .setModificationTime(Time.now())
+        .build();
+
+    rdbStore = new RDBStore(folder.newFolder(), options, configSet);
+    rdbTable = rdbStore.getTable("testTable");
+    rdbTable.put(VOLUME_NAME.getBytes(),
+        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
+    rdbTable.put(BUCKET_NAME.getBytes(),
+        RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8));
+    rdbTable.put(KEY_NAME.getBytes(), keyData.toByteArray());
+  }
+
+  @Test
+  public void allocateBlockFailureInChillMode() throws Exception {
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setKeyName(KEY_NAME)
+        .setBucketName(BUCKET_NAME)
+        .setFactor(ReplicationFactor.ONE)
+        .setType(ReplicationType.STAND_ALONE)
+        .setVolumeName(VOLUME_NAME).build();
+    LambdaTestUtils.intercept(OMException.class,
+        "ChillModePrecheck failed for allocateBlock", () -> {
+          keyManager.allocateBlock(keyArgs, 1);
+        });
+  }
+
+  @Test
+  public void openKeyFailureInChillMode() throws Exception {
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setKeyName(KEY_NAME)
+        .setBucketName(BUCKET_NAME)
+        .setFactor(ReplicationFactor.ONE)
+        .setDataSize(1000)
+        .setType(ReplicationType.STAND_ALONE)
+        .setVolumeName(VOLUME_NAME).build();
+    LambdaTestUtils.intercept(OMException.class,
+        "ChillModePrecheck failed for allocateBlock", () -> {
+          keyManager.openKey(keyArgs);
+        });
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-8771. CapacityScheduler fails to unreserve when cluster resource contains empty resource type. Contributed by Tao Yang.

Posted by xy...@apache.org.
YARN-8771. CapacityScheduler fails to unreserve when cluster resource contains empty resource type. Contributed by Tao Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0712537e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0712537e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0712537e

Branch: refs/heads/HDDS-4
Commit: 0712537e799bc03855d548d1f4bd690dd478b871
Parents: 28ceb34
Author: Weiwei Yang <ww...@apache.org>
Authored: Wed Sep 19 19:31:07 2018 +0800
Committer: Weiwei Yang <ww...@apache.org>
Committed: Wed Sep 19 19:31:07 2018 +0800

----------------------------------------------------------------------
 .../allocator/RegularContainerAllocator.java    |  3 +-
 .../capacity/TestContainerAllocation.java       | 68 ++++++++++++++++++++
 2 files changed, 69 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712537e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
index 3e337ef..c0a11a0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
@@ -543,8 +543,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
             currentResoureLimits.getAmountNeededUnreserve());
 
     boolean needToUnreserve =
-        Resources.greaterThan(rc, clusterResource,
-            resourceNeedToUnReserve, Resources.none());
+        rc.isAnyMajorResourceAboveZero(resourceNeedToUnReserve);
 
     RMContainer unreservedContainer = null;
     boolean reservationsContinueLooking =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0712537e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
index b9bfc2a..3d028ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.RMSecretManagerService;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceProfiles;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
@@ -59,7 +60,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemoved
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator;
+import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
+import org.apache.hadoop.yarn.util.resource.TestResourceUtils;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -984,4 +988,68 @@ public class TestContainerAllocation {
     Assert.assertEquals(2, lq.getMetrics().getAppsPending());
     rm1.close();
   }
+
+  @Test(timeout = 60000)
+  public void testUnreserveWhenClusterResourceHasEmptyResourceType()
+      throws Exception {
+    /**
+     * Test case:
+     * Create a cluster with two nodes whose node resource both are
+     * <8GB, 8core, 0>, create queue "a" whose max-resource is <8GB, 8 core, 0>,
+     * submit app1 to queue "a" whose am use <1GB, 1 core, 0> and launch on nm1,
+     * submit app2 to queue "b" whose am use <1GB, 1 core, 0> and launch on nm1,
+     * app1 asks two <7GB, 1core> containers and nm1 do 1 heartbeat,
+     * then scheduler reserves one container on nm1.
+     *
+     * After nm2 do next node heartbeat, scheduler should unreserve the reserved
+     * container on nm1 then allocate a container on nm2.
+     */
+    TestResourceUtils.addNewTypesToResources("resource1");
+    CapacitySchedulerConfiguration newConf =
+        (CapacitySchedulerConfiguration) TestUtils
+            .getConfigurationWithMultipleQueues(conf);
+    newConf.setClass(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
+        DominantResourceCalculator.class, ResourceCalculator.class);
+    newConf
+        .setBoolean(TestResourceProfiles.TEST_CONF_RESET_RESOURCE_TYPES, false);
+    // Set maximum capacity of queue "a" to 50
+    newConf.setMaximumCapacity(CapacitySchedulerConfiguration.ROOT + ".a", 50);
+    MockRM rm1 = new MockRM(newConf);
+
+    RMNodeLabelsManager nodeLabelsManager = new NullRMNodeLabelsManager();
+    nodeLabelsManager.init(newConf);
+    rm1.getRMContext().setNodeLabelManager(nodeLabelsManager);
+    rm1.start();
+    MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
+    MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB);
+
+    // launch an app to queue "a", AM container should be launched on nm1
+    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
+
+    // launch another app to queue "b", AM container should be launched on nm1
+    RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "b");
+    MockRM.launchAndRegisterAM(app2, rm1, nm1);
+
+    am1.allocate("*", 7 * GB, 2, new ArrayList<ContainerId>());
+
+    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
+    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
+    RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
+    FiCaSchedulerApp schedulerApp1 =
+        cs.getApplicationAttempt(am1.getApplicationAttemptId());
+
+    // Do nm1 heartbeats 1 times, will reserve a container on nm1 for app1
+    cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
+    Assert.assertEquals(1, schedulerApp1.getLiveContainers().size());
+    Assert.assertEquals(1, schedulerApp1.getReservedContainers().size());
+
+    // Do nm2 heartbeats 1 times, will unreserve a container on nm1
+    // and allocate a container on nm2 for app1
+    cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
+    Assert.assertEquals(2, schedulerApp1.getLiveContainers().size());
+    Assert.assertEquals(0, schedulerApp1.getReservedContainers().size());
+
+    rm1.close();
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c93793d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c93793d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c93793d

Branch: refs/heads/HDDS-4
Commit: 0c93793d3aac0ec7e7e5d86fde1493c02a723d9e
Parents: 47511e7
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Wed May 9 15:56:03 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Sep 20 15:13:07 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/common/src/main/resources/ozone-default.xml | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c93793d/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index efb5aa6..bd4dbfd 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -152,6 +152,7 @@
     </description>
   </property>
   <property>
+<<<<<<< HEAD
     <name>dfs.ratis.client.request.timeout.duration</name>
     <value>3s</value>
     <tag>OZONE, RATIS, MANAGEMENT</tag>
@@ -209,6 +210,9 @@
   </property>
   <property>
     <name>hdds.container.report.interval</name>
+=======
+    <name>ozone.container.report.interval</name>
+>>>>>>> HDDS-6. Enable SCM kerberos auth. Contributed by Ajay Kumar.
     <value>60000ms</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>
     <description>Time interval of the datanode to send container report. Each


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement RMHeartbeatHandler. Contributed by Oleksandr Shevchenko

Posted by xy...@apache.org.
MAPREDUCE-7138. ThrottledContainerAllocator in MRAppBenchmark should implement RMHeartbeatHandler. Contributed by Oleksandr Shevchenko


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8382b860
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8382b860
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8382b860

Branch: refs/heads/HDDS-4
Commit: 8382b860d4ef4f20d000537ded42a88e98bd2190
Parents: 34b2237
Author: Jason Lowe <jl...@apache.org>
Authored: Tue Sep 18 17:06:32 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Tue Sep 18 17:06:32 2018 -0500

----------------------------------------------------------------------
 .../hadoop/mapreduce/v2/app/MRAppBenchmark.java      | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8382b860/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index 5e6697b..efe150f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -33,10 +33,12 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy;
 import org.apache.hadoop.mapreduce.v2.app.rm.preemption.NoopAMPreemptionPolicy;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -116,7 +118,7 @@ public class MRAppBenchmark {
     }
     
     class ThrottledContainerAllocator extends AbstractService 
-        implements ContainerAllocator {
+        implements ContainerAllocator, RMHeartbeatHandler {
       private int containerCount;
       private Thread thread;
       private BlockingQueue<ContainerAllocatorEvent> eventQueue =
@@ -182,6 +184,15 @@ public class MRAppBenchmark {
         }
         super.serviceStop();
       }
+
+      @Override
+      public long getLastHeartbeatTime() {
+        return Time.now();
+      }
+
+      @Override
+      public void runOnNextHeartbeat(Runnable callback) {
+      }
     }
   }
 
@@ -264,7 +275,7 @@ public class MRAppBenchmark {
     });
   }
 
-  @Test
+  @Test(timeout = 60000)
   public void benchmark2() throws Exception {
     int maps = 100; // Adjust for benchmarking, start with a couple of thousands
     int reduces = 50;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDDS-458. numberofKeys is 0 for all containers even when keys are present. Contributed by LiXin Ge.

Posted by xy...@apache.org.
HDDS-458. numberofKeys is 0 for all containers even when keys are present. Contributed by LiXin Ge.

(cherry picked from commit 8600b049af184af04dfb93b6bc353740e3e821d3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3c5221f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3c5221f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3c5221f

Branch: refs/heads/HDDS-4
Commit: b3c5221f3091d394ef8c227f6776af07f7116658
Parents: 15ed74f
Author: Márton Elek <el...@apache.org>
Authored: Wed Sep 19 14:51:03 2018 +0200
Committer: Márton Elek <el...@apache.org>
Committed: Wed Sep 19 17:11:05 2018 +0200

----------------------------------------------------------------------
 .../container/keyvalue/KeyValueContainer.java   |  1 +
 .../scm/container/TestContainerMapping.java     | 46 ++++++++++++++++++++
 2 files changed, 47 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3c5221f/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index b893a38..0870c76 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -521,6 +521,7 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
         .setWriteCount(containerData.getWriteCount())
         .setReadBytes(containerData.getReadBytes())
         .setWriteBytes(containerData.getWriteBytes())
+        .setKeyCount(containerData.getKeyCount())
         .setUsed(containerData.getBytesUsed())
         .setState(getHddsState())
         .setDeleteTransactionId(containerData.getDeleteTransactionId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3c5221f/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index 224f6ddd..f9a881e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -281,6 +281,52 @@ public class TestContainerMapping {
   }
 
   @Test
+  public void testListContainerAfterReport() throws Exception {
+    ContainerInfo info1 = createContainer();
+    ContainerInfo info2 = createContainer();
+    DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
+    List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
+        new ArrayList<>();
+    StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
+        StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
+    long cID1 = info1.getContainerID();
+    long cID2 = info2.getContainerID();
+    ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2")
+        .setSize(1000000000L)
+        .setUsed(987654321L)
+        .setKeyCount(100000000L)
+        .setReadBytes(1000000000L)
+        .setWriteBytes(1000000000L)
+        .setContainerID(cID1);
+    reports.add(ciBuilder.build());
+
+    ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea54a9")
+        .setSize(1000000000L)
+        .setUsed(123456789L)
+        .setKeyCount(200000000L)
+        .setReadBytes(3000000000L)
+        .setWriteBytes(4000000000L)
+        .setContainerID(cID2);
+    reports.add(ciBuilder.build());
+
+    ContainerReportsProto.Builder crBuilder = ContainerReportsProto
+        .newBuilder();
+    crBuilder.addAllReports(reports);
+
+    mapping.processContainerReports(datanodeDetails, crBuilder.build(), false);
+
+    List<ContainerInfo> list = mapping.listContainer(0, 50);
+    Assert.assertEquals(2, list.stream().filter(
+        x -> x.getContainerID() == cID1 || x.getContainerID() == cID2).count());
+    Assert.assertEquals(300000000L, list.stream().filter(
+        x -> x.getContainerID() == cID1 || x.getContainerID() == cID2)
+        .mapToLong(x -> x.getNumberOfKeys()).sum());
+    Assert.assertEquals(1111111110L, list.stream().filter(
+        x -> x.getContainerID() == cID1 || x.getContainerID() == cID2)
+        .mapToLong(x -> x.getUsedBytes()).sum());
+  }
+
+  @Test
   public void testCloseContainer() throws IOException {
     ContainerInfo info = createContainer();
     mapping.updateContainerState(info.getContainerID(),


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDFS-13892. Disk Balancer: Make execute command documentation better. Contributed by Ranith Sardar.

Posted by xy...@apache.org.
HDFS-13892. Disk Balancer: Make execute command documentation better.
Contributed by  Ranith Sardar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fc293fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fc293fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fc293fe

Branch: refs/heads/HDDS-4
Commit: 6fc293fece935e3524ae59699aa3c3e3d98f6d86
Parents: 6b5838e
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Sep 19 20:48:41 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Sep 19 20:48:41 2018 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md            | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc293fe/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 5dd6ffc..955f179 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -78,7 +78,9 @@ Execute command takes a plan command executes it against the datanode that plan
 `hdfs diskbalancer -execute /system/diskbalancer/nodename.plan.json`
 
 This executes the plan by reading datanode’s address from the plan file.
-
+When DiskBalancer executes the plan, it is the beginning of an asynchronous process that can take a long time.
+So, query command can help to get the current status of execute command.
+ 
 | COMMAND\_OPTION    | Description |
 |:---- |:---- |
 | `-skipDateCheck` |  Skip date check and force execute the plan.|


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HDDS-500. TestErrorCode.java has wrong package name. Contributed by Anu Engineer.

Posted by xy...@apache.org.
HDDS-500. TestErrorCode.java has wrong package name. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ff00f55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ff00f55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ff00f55

Branch: refs/heads/HDDS-4
Commit: 7ff00f558713f2f0755193b7d57ebdad8d8f349a
Parents: f176e8a3
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Sep 18 15:52:00 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Sep 18 15:52:00 2018 -0700

----------------------------------------------------------------------
 .../hadoop/ozone/web/client/TestKeys.java       |  1 +
 .../org/apache/hadoop/ozone/TestErrorCode.java  | 53 --------------------
 .../apache/hadoop/ozone/web/TestErrorCode.java  | 53 ++++++++++++++++++++
 3 files changed, 54 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff00f55/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index 21887be..6377f11 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -328,6 +328,7 @@ public class TestKeys {
     cluster.restartHddsDatanode(datanodeIdx);
   }
 
+  @Ignore("Causes a JVm exit")
   @Test
   public void testPutAndGetKeyWithDnRestart() throws Exception {
     runTestPutAndGetKeyWithDnRestart(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff00f55/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/TestErrorCode.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/TestErrorCode.java b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/TestErrorCode.java
deleted file mode 100644
index abb61bb..0000000
--- a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/TestErrorCode.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.junit.Test;
-
-import static junit.framework.TestCase.assertEquals;
-import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID;
-
-/**
- * Test Ozone Error Codes.
- */
-public class TestErrorCode {
-  /**
-   * Test Error Generator functions.
-   */
-  @Test
-  public void testErrorGen() {
-    OzoneException e = ErrorTable
-        .newError(ErrorTable.ACCESS_DENIED, getRequestID(), "/test/path",
-                  "localhost");
-    assertEquals(e.getHostID(), "localhost");
-    assertEquals(e.getShortMessage(),
-                 ErrorTable.ACCESS_DENIED.getShortMessage());
-  }
-
-  @Test
-  public void testErrorGenWithException() {
-    OzoneException e =
-        new OzoneException(ErrorTable.ACCESS_DENIED.getHttpCode(),
-                           "short message", new Exception("Hello"));
-    assertEquals("short message", e.getShortMessage());
-    assertEquals("Hello", e.getMessage());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff00f55/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java
new file mode 100644
index 0000000..abb61bb
--- /dev/null
+++ b/hadoop-ozone/objectstore-service/src/test/java/org/apache/hadoop/ozone/web/TestErrorCode.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.web;
+
+import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
+import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.junit.Test;
+
+import static junit.framework.TestCase.assertEquals;
+import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID;
+
+/**
+ * Test Ozone Error Codes.
+ */
+public class TestErrorCode {
+  /**
+   * Test Error Generator functions.
+   */
+  @Test
+  public void testErrorGen() {
+    OzoneException e = ErrorTable
+        .newError(ErrorTable.ACCESS_DENIED, getRequestID(), "/test/path",
+                  "localhost");
+    assertEquals(e.getHostID(), "localhost");
+    assertEquals(e.getShortMessage(),
+                 ErrorTable.ACCESS_DENIED.getShortMessage());
+  }
+
+  @Test
+  public void testErrorGenWithException() {
+    OzoneException e =
+        new OzoneException(ErrorTable.ACCESS_DENIED.getHttpCode(),
+                           "short message", new Exception("Hello"));
+    assertEquals("short message", e.getShortMessage());
+    assertEquals("Hello", e.getMessage());
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HDDS-503. Suppress ShellBasedUnixGroupsMapping exception in tests. Contributed by Arpit Agarwal.

Posted by xy...@apache.org.
HDDS-503. Suppress ShellBasedUnixGroupsMapping exception in tests. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17f5651a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17f5651a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17f5651a

Branch: refs/heads/HDDS-4
Commit: 17f5651a5124c6d00fc990f252de7af5c226a314
Parents: 7ff00f5
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Sep 18 16:06:21 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Tue Sep 18 16:06:21 2018 -0700

----------------------------------------------------------------------
 .../integration-test/src/test/resources/log4j.properties        | 5 ++++-
 hadoop-ozone/ozonefs/src/test/resources/log4j.properties        | 1 +
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17f5651a/hadoop-ozone/integration-test/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/resources/log4j.properties b/hadoop-ozone/integration-test/src/test/resources/log4j.properties
index cad9dd1..9ec5a92 100644
--- a/hadoop-ozone/integration-test/src/test/resources/log4j.properties
+++ b/hadoop-ozone/integration-test/src/test/resources/log4j.properties
@@ -15,4 +15,7 @@ log4j.rootLogger=info,stdout
 log4j.threshold=ALL
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
\ No newline at end of file
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17f5651a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties b/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
index 3bf1619..a7b5aa9 100644
--- a/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
+++ b/hadoop-ozone/ozonefs/src/test/resources/log4j.properties
@@ -17,6 +17,7 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
 log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
 
+log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
 log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 
 # for debugging low level Ozone operations, uncomment this line


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: HDDS-461. Container remains in CLOSING state in SCM forever. Contributed by Shashikant Banerjee.

Posted by xy...@apache.org.
HDDS-461. Container remains in CLOSING state in SCM forever. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61a4b07b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61a4b07b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61a4b07b

Branch: refs/heads/HDDS-4
Commit: 61a4b07bdaf68d45c7c0bba0485d95a1d908a73d
Parents: b3c5221
Author: Nanda kumar <na...@apache.org>
Authored: Wed Sep 19 21:43:44 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Wed Sep 19 21:43:44 2018 +0530

----------------------------------------------------------------------
 .../common/statemachine/StateContext.java       |  42 +++++--
 .../CloseContainerCommandHandler.java           |  17 ++-
 .../transport/server/XceiverServerGrpc.java     |  12 +-
 .../server/ratis/XceiverServerRatis.java        |  31 +++--
 .../container/ozoneimpl/OzoneContainer.java     | 116 ++++++-------------
 .../commands/CloseContainerCommand.java         |  23 +---
 .../container/CloseContainerEventHandler.java   |  90 ++++++++------
 .../TestCloseContainerByPipeline.java           |   5 +-
 8 files changed, 175 insertions(+), 161 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4b07b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 9d5a778..47c2492 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine;
 
+import com.google.common.base.Preconditions;
 import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -43,6 +44,7 @@ import org.slf4j.LoggerFactory;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Queue;
+import java.util.ArrayList;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
@@ -50,7 +52,6 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
-import java.util.stream.Collectors;
 
 import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
 
@@ -206,9 +207,18 @@ public class StateContext {
    * @return List<reports>
    */
   public List<GeneratedMessage> getReports(int maxLimit) {
+    List<GeneratedMessage> reportList = new ArrayList<>();
     synchronized (reports) {
-      return reports.parallelStream().limit(maxLimit)
-          .collect(Collectors.toList());
+      if (!reports.isEmpty()) {
+        int size = reports.size();
+        int limit = size > maxLimit ? maxLimit : size;
+        for (int count = 0; count < limit; count++) {
+          GeneratedMessage report = reports.poll();
+          Preconditions.checkNotNull(report);
+          reportList.add(report);
+        }
+      }
+      return reportList;
     }
   }
 
@@ -254,9 +264,20 @@ public class StateContext {
    * @return List<ContainerAction>
    */
   public List<ContainerAction> getPendingContainerAction(int maxLimit) {
+    List<ContainerAction> containerActionList = new ArrayList<>();
     synchronized (containerActions) {
-      return containerActions.parallelStream().limit(maxLimit)
-          .collect(Collectors.toList());
+      if (!containerActions.isEmpty()) {
+        int size = containerActions.size();
+        int limit = size > maxLimit ? maxLimit : size;
+        for (int count = 0; count < limit; count++) {
+          // we need to remove the action from the containerAction queue
+          // as well
+          ContainerAction action = containerActions.poll();
+          Preconditions.checkNotNull(action);
+          containerActionList.add(action);
+        }
+      }
+      return containerActionList;
     }
   }
 
@@ -295,9 +316,16 @@ public class StateContext {
    * @return List<ContainerAction>
    */
   public List<PipelineAction> getPendingPipelineAction(int maxLimit) {
+    List<PipelineAction> pipelineActionList = new ArrayList<>();
     synchronized (pipelineActions) {
-      return pipelineActions.parallelStream().limit(maxLimit)
-          .collect(Collectors.toList());
+      if (!pipelineActions.isEmpty()) {
+        int size = pipelineActions.size();
+        int limit = size > maxLimit ? maxLimit : size;
+        for (int count = 0; count < limit; count++) {
+          pipelineActionList.add(pipelineActions.poll());
+        }
+      }
+      return pipelineActionList;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4b07b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
index 030a357..d4e13ee 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
+import org.apache.ratis.protocol.NotLeaderException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -89,9 +90,21 @@ public class CloseContainerCommandHandler implements CommandHandler {
       // submit the close container request for the XceiverServer to handle
       container.submitContainerRequest(
           request.build(), replicationType, pipelineID);
-      cmdExecuted = true;
     } catch (Exception e) {
-      LOG.error("Can't close container " + containerID, e);
+      if (e instanceof NotLeaderException) {
+        // If the particular datanode is not the Ratis leader, the close
+        // container command will not be executed by the follower but will be
+        // executed by Ratis stateMachine transactions via leader to follower.
+        // There can also be case where the datanode is in candidate state.
+        // In these situations, NotLeaderException is thrown. Remove the status
+        // from cmdStatus Map here so that it will be retried only by SCM if the
+        // leader could not not close the container after a certain time.
+        context.removeCommandStatus(containerID);
+        LOG.info(e.getLocalizedMessage());
+      } else {
+        LOG.error("Can't close container " + containerID, e);
+        cmdExecuted = false;
+      }
     } finally {
       updateCommandStatus(context, command, cmdExecuted, LOG);
       long endTime = Time.monotonicNow();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4b07b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 83e742c..c51da98 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -21,12 +21,15 @@ package org.apache.hadoop.ozone.container.common.transport.server;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReport;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.
+    StorageContainerException;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 
@@ -128,8 +131,13 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
 
   @Override
   public void submitRequest(ContainerCommandRequestProto request,
-      HddsProtos.PipelineID pipelineID) {
-    storageContainer.dispatch(request);
+      HddsProtos.PipelineID pipelineID) throws IOException {
+    ContainerProtos.ContainerCommandResponseProto response =
+        storageContainer.dispatch(request);
+    if (response.getResult() != ContainerProtos.Result.SUCCESS) {
+      throw new StorageContainerException(response.getMessage(),
+          response.getResult());
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4b07b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index d88995b..c2ef504 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -74,7 +74,6 @@ import java.util.List;
 import java.util.Objects;
 import java.util.UUID;
 import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -319,39 +318,35 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     return server;
   }
 
-  private void processReply(RaftClientReply reply) {
-
+  private void processReply(RaftClientReply reply) throws IOException {
     // NotLeader exception is thrown only when the raft server to which the
     // request is submitted is not the leader. The request will be rejected
-    // and will eventually be executed once the request comnes via the leader
+    // and will eventually be executed once the request comes via the leader
     // node.
     NotLeaderException notLeaderException = reply.getNotLeaderException();
     if (notLeaderException != null) {
-      LOG.info(reply.getNotLeaderException().getLocalizedMessage());
+      throw notLeaderException;
     }
     StateMachineException stateMachineException =
         reply.getStateMachineException();
     if (stateMachineException != null) {
-      // In case the request could not be completed, StateMachine Exception
-      // will be thrown. For now, Just log the message.
-      // If the container could not be closed, SCM will come to know
-      // via containerReports. CloseContainer should be re tried via SCM.
-      LOG.error(stateMachineException.getLocalizedMessage());
+      throw stateMachineException;
     }
   }
 
   @Override
-  public void submitRequest(
-      ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID)
-      throws IOException {
-    // ReplicationLevel.MAJORITY ensures the transactions corresponding to
-    // the request here are applied on all the raft servers.
+  public void submitRequest(ContainerCommandRequestProto request,
+      HddsProtos.PipelineID pipelineID) throws IOException {
+    RaftClientReply reply;
     RaftClientRequest raftClientRequest =
         createRaftClientRequest(request, pipelineID,
             RaftClientRequest.writeRequestType(replicationLevel));
-    CompletableFuture<RaftClientReply> reply =
-        server.submitClientRequestAsync(raftClientRequest);
-    reply.thenAccept(this::processReply);
+    try {
+      reply = server.submitClientRequestAsync(raftClientRequest).get();
+    } catch (Exception e) {
+      throw new IOException(e.getMessage(), e);
+    }
+    processReply(reply);
   }
 
   private RaftClientRequest createRaftClientRequest(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4b07b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index ebacf75..da58772 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -23,7 +23,8 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
@@ -46,12 +47,14 @@ import org.slf4j.LoggerFactory;
 
 import java.io.*;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.Iterator;
+import java.util.Map;
 
 import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
 
 /**
- * Ozone main class sets up the network server and initializes the container
+ * Ozone main class sets up the network servers and initializes the container
  * layer.
  */
 public class OzoneContainer {
@@ -64,7 +67,7 @@ public class OzoneContainer {
   private final OzoneConfiguration config;
   private final VolumeSet volumeSet;
   private final ContainerSet containerSet;
-  private final XceiverServerSpi[] server;
+  private final Map<ReplicationType, XceiverServerSpi> servers;
 
   /**
    * Construct OzoneContainer object.
@@ -82,14 +85,13 @@ public class OzoneContainer {
     buildContainerSet();
     hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet,
         context);
-    server = new XceiverServerSpi[]{
-        new XceiverServerGrpc(datanodeDetails, this.config, this
-            .hddsDispatcher, createReplicationService()),
-        XceiverServerRatis.newXceiverServerRatis(datanodeDetails, this
-            .config, hddsDispatcher, context)
-    };
-
-
+    servers = new HashMap<>();
+    servers.put(ReplicationType.STAND_ALONE,
+        new XceiverServerGrpc(datanodeDetails, config, hddsDispatcher,
+            createReplicationService()));
+    servers.put(ReplicationType.RATIS, XceiverServerRatis
+        .newXceiverServerRatis(datanodeDetails, config, hddsDispatcher,
+            context));
   }
 
   private GrpcReplicationService createReplicationService() {
@@ -133,7 +135,7 @@ public class OzoneContainer {
    */
   public void start() throws IOException {
     LOG.info("Attempting to start container services.");
-    for (XceiverServerSpi serverinstance : server) {
+    for (XceiverServerSpi serverinstance : servers.values()) {
       serverinstance.start();
     }
     hddsDispatcher.init();
@@ -145,7 +147,7 @@ public class OzoneContainer {
   public void stop() {
     //TODO: at end of container IO integration work.
     LOG.info("Attempting to stop container services.");
-    for(XceiverServerSpi serverinstance: server) {
+    for(XceiverServerSpi serverinstance: servers.values()) {
       serverinstance.stop();
     }
     hddsDispatcher.shutdown();
@@ -169,7 +171,7 @@ public class OzoneContainer {
   public PipelineReportsProto getPipelineReport() {
     PipelineReportsProto.Builder pipelineReportsProto =
             PipelineReportsProto.newBuilder();
-    for (XceiverServerSpi serverInstance : server) {
+    for (XceiverServerSpi serverInstance : servers.values()) {
       pipelineReportsProto
               .addAllPipelineReport(serverInstance.getPipelineReport());
     }
@@ -181,82 +183,38 @@ public class OzoneContainer {
    * @param request
    * @param replicationType
    * @param pipelineID
-   * @throws IOException
    */
   public void submitContainerRequest(
       ContainerProtos.ContainerCommandRequestProto request,
-      HddsProtos.ReplicationType replicationType,
-      HddsProtos.PipelineID pipelineID) throws IOException {
-    XceiverServerSpi serverInstance;
-    long containerId = getContainerIdForCmd(request);
-    if (replicationType == HddsProtos.ReplicationType.RATIS) {
-      serverInstance = getRatisSerer();
-      Preconditions.checkNotNull(serverInstance);
-      serverInstance.submitRequest(request, pipelineID);
-      LOG.info("submitting {} request over RATIS server for container {}",
-          request.getCmdType(), containerId);
-    } else {
-      serverInstance = getStandaAloneSerer();
-      Preconditions.checkNotNull(serverInstance);
-      getStandaAloneSerer().submitRequest(request, pipelineID);
-      LOG.info(
-          "submitting {} request over STAND_ALONE server for container {}",
-          request.getCmdType(), containerId);
+      ReplicationType replicationType,
+      PipelineID pipelineID) throws IOException {
+    if (containerSet.getContainer(request.getContainerID())
+        .getContainerData().isClosed()) {
+      LOG.debug("Container {} is already closed", request.getContainerID());
+      // It might happen that the where the first attempt of closing the
+      // container failed with NOT_LEADER_EXCEPTION. In such cases, SCM will
+      // retry to check the container got really closed via Ratis.
+      // In such cases of the retry attempt, if the container is already closed
+      // via Ratis, we should just return.
     }
-
+    LOG.info("submitting {} request over {} server for container {}",
+        request.getCmdType(), replicationType, request.getContainerID());
+    Preconditions.checkState(servers.containsKey(replicationType));
+    servers.get(replicationType).submitRequest(request, pipelineID);
   }
 
-  private long getContainerIdForCmd(
-      ContainerProtos.ContainerCommandRequestProto request)
-      throws IllegalArgumentException {
-    ContainerProtos.Type type = request.getCmdType();
-    switch (type) {
-    case CloseContainer:
-      return request.getContainerID();
-      // Right now, we handle only closeContainer via queuing it over the
-      // over the XceiVerServer. For all other commands we throw Illegal
-      // argument exception here. Will need to extend the switch cases
-      // in case we want add another commands here.
-    default:
-      throw new IllegalArgumentException("Cmd " + request.getCmdType()
-          + " not supported over HearBeat Response");
-    }
-  }
-
-  private XceiverServerSpi getRatisSerer() {
-    for (XceiverServerSpi serverInstance : server) {
-      if (serverInstance instanceof XceiverServerRatis) {
-        return serverInstance;
-      }
-    }
-    return null;
-  }
-
-  private XceiverServerSpi getStandaAloneSerer() {
-    for (XceiverServerSpi serverInstance : server) {
-      if (!(serverInstance instanceof XceiverServerRatis)) {
-        return serverInstance;
-      }
-    }
-    return null;
-  }
-
-  private int getPortbyType(HddsProtos.ReplicationType replicationType) {
-    for (XceiverServerSpi serverinstance : server) {
-      if (serverinstance.getServerType() == replicationType) {
-        return serverinstance.getIPCPort();
-      }
-    }
-    return INVALID_PORT;
+  private int getPortByType(ReplicationType replicationType) {
+    return servers.containsKey(replicationType) ?
+        servers.get(replicationType).getIPCPort() : INVALID_PORT;
   }
 
   /**
-   * Returns the container server IPC port.
+   * Returns the container servers IPC port.
    *
-   * @return Container server IPC port.
+   * @return Container servers IPC port.
    */
   public int getContainerServerPort() {
-    return getPortbyType(HddsProtos.ReplicationType.STAND_ALONE);
+    return getPortByType(ReplicationType.STAND_ALONE);
   }
 
   /**
@@ -265,7 +223,7 @@ public class OzoneContainer {
    * @return Ratis port.
    */
   public int getRatisContainerServerPort() {
-    return getPortbyType(HddsProtos.ReplicationType.RATIS);
+    return getPortByType(ReplicationType.RATIS);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4b07b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
index aaa5f11..c2c20a4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
@@ -30,25 +30,13 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 public class CloseContainerCommand
     extends SCMCommand<CloseContainerCommandProto> {
 
-  private long containerID;
   private HddsProtos.ReplicationType replicationType;
   private PipelineID pipelineID;
 
   public CloseContainerCommand(long containerID,
       HddsProtos.ReplicationType replicationType,
       PipelineID pipelineID) {
-    super();
-    this.containerID = containerID;
-    this.replicationType = replicationType;
-    this.pipelineID = pipelineID;
-  }
-
-  // Should be called only for protobuf conversion
-  private CloseContainerCommand(long containerID,
-      HddsProtos.ReplicationType replicationType,
-      PipelineID pipelineID, long id) {
-    super(id);
-    this.containerID = containerID;
+    super(containerID);
     this.replicationType = replicationType;
     this.pipelineID = pipelineID;
   }
@@ -75,7 +63,7 @@ public class CloseContainerCommand
 
   public CloseContainerCommandProto getProto() {
     return CloseContainerCommandProto.newBuilder()
-        .setContainerID(containerID)
+        .setContainerID(getId())
         .setCmdId(getId())
         .setReplicationType(replicationType)
         .setPipelineID(pipelineID.getProtobuf())
@@ -85,13 +73,12 @@ public class CloseContainerCommand
   public static CloseContainerCommand getFromProtobuf(
       CloseContainerCommandProto closeContainerProto) {
     Preconditions.checkNotNull(closeContainerProto);
-    return new CloseContainerCommand(closeContainerProto.getContainerID(),
+    return new CloseContainerCommand(closeContainerProto.getCmdId(),
         closeContainerProto.getReplicationType(),
-        PipelineID.getFromProtobuf(closeContainerProto.getPipelineID()),
-        closeContainerProto.getCmdId());
+        PipelineID.getFromProtobuf(closeContainerProto.getPipelineID()));
   }
 
   public long getContainerID() {
-    return containerID;
+    return getId();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4b07b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index b94ce4f..7baecc4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -17,10 +17,10 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.IdentifiableEventPayload;
@@ -57,7 +57,7 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
 
     LOG.info("Close container Event triggered for container : {}",
         containerID.getId());
-    ContainerWithPipeline containerWithPipeline = null;
+    ContainerWithPipeline containerWithPipeline;
     ContainerInfo info;
     try {
       containerWithPipeline =
@@ -74,42 +74,66 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
       return;
     }
 
-    if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
-      for (DatanodeDetails datanode :
-          containerWithPipeline.getPipeline().getMachines()) {
-        CommandForDatanode closeContainerCommand = new CommandForDatanode<>(
-            datanode.getUuid(),
-            new CloseContainerCommand(containerID.getId(),
-                info.getReplicationType(), info.getPipelineID()));
-        publisher.fireEvent(DATANODE_COMMAND, closeContainerCommand);
-        publisher.fireEvent(CLOSE_CONTAINER_RETRYABLE_REQ, new
-            CloseContainerRetryableReq(containerID));
-      }
-      try {
-        // Finalize event will make sure the state of the container transitions
-        // from OPEN to CLOSING in containerStateManager.
-        containerManager.updateContainerState(containerID.getId(),
-            HddsProtos.LifeCycleEvent.FINALIZE);
-      } catch (IOException ex) {
-        LOG.error("Failed to update the container state to FINALIZE for"
-            + "container : {}" + containerID, ex);
-      }
-    } else if (info.getState() == HddsProtos.LifeCycleState.ALLOCATED) {
-      try {
-        // Create event will make sure the state of the container transitions
-        // from OPEN to CREATING in containerStateManager, this will move
-        // the container out of active allocation path.
+    HddsProtos.LifeCycleState state = info.getState();
+    try {
+      switch (state) {
+      case ALLOCATED:
+        // We cannot close a container in ALLOCATED state, moving the
+        // container to CREATING state, this should eventually
+        // timeout and the container will be moved to DELETING state.
+        LOG.debug("Closing container {} in {} state", containerID, state);
         containerManager.updateContainerState(containerID.getId(),
             HddsProtos.LifeCycleEvent.CREATE);
-      } catch (IOException ex) {
-        LOG.error("Failed to update the container state to CREATE for"
-            + "container:{}" + containerID, ex);
+        break;
+      case CREATING:
+        // We cannot close a container in CREATING state, it will eventually
+        // timeout and moved to DELETING state.
+        LOG.debug("Closing container {} in {} state", containerID, state);
+        break;
+      case OPEN:
+        containerManager.updateContainerState(containerID.getId(),
+            HddsProtos.LifeCycleEvent.FINALIZE);
+        fireCloseContainerEvents(containerWithPipeline, info, publisher);
+        break;
+      case CLOSING:
+        fireCloseContainerEvents(containerWithPipeline, info, publisher);
+        break;
+      case CLOSED:
+      case DELETING:
+      case DELETED:
+        LOG.info(
+            "container with id : {} is in {} state and need not be closed.",
+            containerID.getId(), info.getState());
+        break;
+      default:
+        throw new IOException(
+            "Invalid container state for container " + containerID);
       }
-    } else {
-      LOG.info("container with id : {} is in {} state and need not be closed.",
-          containerID.getId(), info.getState());
+    } catch (IOException ex) {
+      LOG.error("Failed to update the container state for" + "container : {}"
+          + containerID, ex);
     }
+  }
+
+  private void fireCloseContainerEvents(
+      ContainerWithPipeline containerWithPipeline, ContainerInfo info,
+      EventPublisher publisher) {
+    ContainerID containerID = info.containerID();
+    // fire events.
+    CloseContainerCommand closeContainerCommand =
+        new CloseContainerCommand(containerID.getId(),
+            info.getReplicationType(), info.getPipelineID());
 
+    Pipeline pipeline = containerWithPipeline.getPipeline();
+    pipeline.getMachines().stream().map(
+        datanode -> new CommandForDatanode<>(datanode.getUuid(),
+            closeContainerCommand)).forEach((command) -> {
+              publisher.fireEvent(DATANODE_COMMAND, command);
+            });
+    publisher.fireEvent(CLOSE_CONTAINER_RETRYABLE_REQ,
+        new CloseContainerRetryableReq(containerID));
+    LOG.trace("Issuing {} on Pipeline {} for container", closeContainerCommand,
+        pipeline, containerID);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61a4b07b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index ed9c54d..8c52847 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -132,7 +132,6 @@ public class TestCloseContainerByPipeline {
     // Make sure the closeContainerCommandHandler is Invoked
     Assert.assertTrue(
         closeContainerHandler.getInvocationCount() > lastInvocationCount);
-
   }
 
   @Test
@@ -190,6 +189,7 @@ public class TestCloseContainerByPipeline {
     Assert.assertFalse((logCapturer.getOutput().contains(
         "submitting CloseContainer request over RATIS server for container "
             + containerID)));
+    logCapturer.stopCapturing();
   }
 
   @Test
@@ -239,13 +239,14 @@ public class TestCloseContainerByPipeline {
       Assert.assertTrue(isContainerClosed(cluster,
           containerID, datanodeDetails));
     }
+    // Make sure it was really closed via Ratis not STAND_ALONE server
     Assert.assertFalse(logCapturer.getOutput().contains(
         "submitting CloseContainer request over STAND_ALONE "
             + "server for container " + containerID));
-    // Make sure it was really closed via StandAlone not Ratis server
     Assert.assertTrue((logCapturer.getOutput().contains(
         "submitting CloseContainer request over RATIS server for container "
             + containerID)));
+    logCapturer.stopCapturing();
   }
 
   private Boolean isContainerClosed(MiniOzoneCluster cluster, long containerID,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HDDS-496. Ozone tools module is incorrectly classified as 'hdds' component. Contributed by Dinesh Chitlangia.

Posted by xy...@apache.org.
HDDS-496. Ozone tools module is incorrectly classified as 'hdds' component. Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c2ae7e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c2ae7e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c2ae7e4

Branch: refs/heads/HDDS-4
Commit: 5c2ae7e493892b6157f73e82ca89c39926623bb1
Parents: a968ea4
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Tue Sep 18 14:50:52 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Tue Sep 18 14:51:03 2018 -0700

----------------------------------------------------------------------
 hadoop-ozone/tools/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c2ae7e4/hadoop-ozone/tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml
index 5296502..eeec595 100644
--- a/hadoop-ozone/tools/pom.xml
+++ b/hadoop-ozone/tools/pom.xml
@@ -29,7 +29,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <packaging>jar</packaging>
 
   <properties>
-    <hadoop.component>hdds</hadoop.component>
+    <hadoop.component>ozone</hadoop.component>
     <is.hadoop.component>true</is.hadoop.component>
   </properties>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: YARN-8784. DockerLinuxContainerRuntime prevents access to distributed cache entries on a full disk. Contributed by Eric Badger

Posted by xy...@apache.org.
YARN-8784. DockerLinuxContainerRuntime prevents access to distributed cache entries on a full disk. Contributed by Eric Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b5838ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b5838ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b5838ed

Branch: refs/heads/HDDS-4
Commit: 6b5838ed3220f992092c7348f92f1d9d0d4a3061
Parents: f6bb1ca
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Sep 19 16:44:51 2018 -0500
Committer: Jason Lowe <jl...@apache.org>
Committed: Wed Sep 19 16:44:51 2018 -0500

----------------------------------------------------------------------
 .../launcher/ContainerLaunch.java               |  5 +-
 .../launcher/TestContainerLaunch.java           | 81 ++++++++++++++++++++
 2 files changed, 84 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b5838ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
index 2aca5f8..6347d4e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java
@@ -250,12 +250,13 @@ public class ContainerLaunch implements Callable<Integer> {
       // accessible by users
       pidFilePath = dirsHandler.getLocalPathForWrite(pidFileSubpath);
       List<String> localDirs = dirsHandler.getLocalDirs();
+      List<String> localDirsForRead = dirsHandler.getLocalDirsForRead();
       List<String> logDirs = dirsHandler.getLogDirs();
-      List<String> filecacheDirs = getNMFilecacheDirs(localDirs);
+      List<String> filecacheDirs = getNMFilecacheDirs(localDirsForRead);
       List<String> userLocalDirs = getUserLocalDirs(localDirs);
       List<String> containerLocalDirs = getContainerLocalDirs(localDirs);
       List<String> containerLogDirs = getContainerLogDirs(logDirs);
-      List<String> userFilecacheDirs = getUserFilecacheDirs(localDirs);
+      List<String> userFilecacheDirs = getUserFilecacheDirs(localDirsForRead);
       List<String> applicationLocalDirs = getApplicationLocalDirs(localDirs,
           appIdStr);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b5838ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
index ddf46a6..5714a1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java
@@ -96,6 +96,7 @@ import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
 import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
+import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager.NMContext;
 import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
@@ -107,6 +108,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.DockerLinuxContainerRuntime;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
+import org.apache.hadoop.yarn.server.nodemanager.executor.ContainerStartContext;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
@@ -121,6 +123,7 @@ import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.ArgumentCaptor;
 
 public class TestContainerLaunch extends BaseContainerManagerTest {
 
@@ -2362,4 +2365,82 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
       }
     }
   }
+
+  @Test
+  public void testDistributedCacheDirs() throws Exception {
+    Container container = mock(Container.class);
+    ApplicationId appId =
+        ApplicationId.newInstance(System.currentTimeMillis(), 1);
+    ContainerId containerId = ContainerId
+        .newContainerId(ApplicationAttemptId.newInstance(appId, 1), 1);
+    when(container.getContainerId()).thenReturn(containerId);
+    when(container.getUser()).thenReturn("test");
+
+    when(container.getLocalizedResources())
+        .thenReturn(Collections.<Path, List<String>> emptyMap());
+    Dispatcher dispatcher = mock(Dispatcher.class);
+
+    ContainerLaunchContext clc = mock(ContainerLaunchContext.class);
+    when(clc.getCommands()).thenReturn(Collections.<String>emptyList());
+    when(container.getLaunchContext()).thenReturn(clc);
+
+    @SuppressWarnings("rawtypes")
+    ContainerExitHandler eventHandler =
+        mock(ContainerExitHandler.class);
+    when(dispatcher.getEventHandler()).thenReturn(eventHandler);
+
+    Application app = mock(Application.class);
+    when(app.getAppId()).thenReturn(appId);
+    when(app.getUser()).thenReturn("test");
+
+    Credentials creds = mock(Credentials.class);
+    when(container.getCredentials()).thenReturn(creds);
+
+    ((NMContext) context).setNodeId(NodeId.newInstance("127.0.0.1", HTTP_PORT));
+    ContainerExecutor mockExecutor = mock(ContainerExecutor.class);
+
+    LocalDirsHandlerService mockDirsHandler =
+        mock(LocalDirsHandlerService.class);
+
+    List <String> localDirsForRead = new ArrayList<String>();
+    String localDir1 =
+      new File("target", this.getClass().getSimpleName() + "-localDir1")
+        .getAbsoluteFile().toString();
+    String localDir2 =
+      new File("target", this.getClass().getSimpleName() + "-localDir2")
+        .getAbsoluteFile().toString();
+    localDirsForRead.add(localDir1);
+    localDirsForRead.add(localDir2);
+
+    List <String> localDirs = new ArrayList();
+    localDirs.add(localDir1);
+    Path logPathForWrite = new Path(localDirs.get(0));
+
+    when(mockDirsHandler.areDisksHealthy()).thenReturn(true);
+    when(mockDirsHandler.getLocalDirsForRead()).thenReturn(localDirsForRead);
+    when(mockDirsHandler.getLocalDirs()).thenReturn(localDirs);
+    when(mockDirsHandler.getLogDirs()).thenReturn(localDirs);
+    when(mockDirsHandler.getLogPathForWrite(anyString(),
+        anyBoolean())).thenReturn(logPathForWrite);
+    when(mockDirsHandler.getLocalPathForWrite(anyString()))
+        .thenReturn(logPathForWrite);
+    when(mockDirsHandler.getLocalPathForWrite(anyString(), anyLong(),
+      anyBoolean())).thenReturn(logPathForWrite);
+
+    ContainerLaunch launch = new ContainerLaunch(context, conf, dispatcher,
+        mockExecutor, app, container, mockDirsHandler, containerManager);
+    launch.call();
+
+    ArgumentCaptor <ContainerStartContext> ctxCaptor =
+        ArgumentCaptor.forClass(ContainerStartContext.class);
+    verify(mockExecutor, times(1)).launchContainer(ctxCaptor.capture());
+    ContainerStartContext ctx = ctxCaptor.getValue();
+
+    Assert.assertEquals(StringUtils.join(",",
+        launch.getNMFilecacheDirs(localDirsForRead)),
+        StringUtils.join(",", ctx.getFilecacheDirs()));
+    Assert.assertEquals(StringUtils.join(",",
+        launch.getUserFilecacheDirs(localDirsForRead)),
+        StringUtils.join(",", ctx.getUserFilecacheDirs()));
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.

Posted by xy...@apache.org.
HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis.
Contributed Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/096a7160
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/096a7160
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/096a7160

Branch: refs/heads/HDDS-4
Commit: 096a7160803494219581c067dfcdb67d2bd0bcdb
Parents: aa4bd49
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Sep 20 11:51:49 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Sep 20 11:51:49 2018 -0700

----------------------------------------------------------------------
 .../hdds/scm/storage/ChunkOutputStream.java     |  13 +-
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |   8 +-
 .../scm/storage/ContainerProtocolCalls.java     |  62 ++---
 .../container/common/helpers/BlockData.java     | 255 +++++++++++++++++++
 .../ozone/container/common/helpers/KeyData.java | 253 ------------------
 .../main/proto/DatanodeContainerProtocol.proto  |  74 +++---
 .../common/impl/OpenContainerBlockMap.java      |  46 ++--
 .../DeleteBlocksCommandHandler.java             |   4 +-
 .../server/ratis/ContainerStateMachine.java     |  28 +-
 .../keyvalue/KeyValueBlockIterator.java         |  16 +-
 .../container/keyvalue/KeyValueContainer.java   |   4 +-
 .../container/keyvalue/KeyValueHandler.java     | 124 ++++-----
 .../container/keyvalue/helpers/BlockUtils.java  | 199 +++++++++++++++
 .../container/keyvalue/helpers/KeyUtils.java    | 199 ---------------
 .../keyvalue/helpers/KeyValueContainerUtil.java |  12 +-
 .../keyvalue/helpers/SmallFileUtils.java        |   2 +-
 .../keyvalue/impl/BlockManagerImpl.java         | 229 +++++++++++++++++
 .../container/keyvalue/impl/KeyManagerImpl.java | 227 -----------------
 .../container/keyvalue/impl/package-info.java   |   5 +-
 .../keyvalue/interfaces/BlockManager.java       |  84 ++++++
 .../keyvalue/interfaces/KeyManager.java         |  84 ------
 .../keyvalue/interfaces/package-info.java       |  21 ++
 .../background/BlockDeletingService.java        |  10 +-
 .../keyvalue/TestBlockManagerImpl.java          | 211 +++++++++++++++
 .../keyvalue/TestChunkManagerImpl.java          |   2 +-
 .../container/keyvalue/TestKeyManagerImpl.java  | 191 --------------
 .../keyvalue/TestKeyValueBlockIterator.java     |  30 +--
 .../keyvalue/TestKeyValueContainer.java         |  26 +-
 .../container/keyvalue/TestKeyValueHandler.java |  38 +--
 .../ozone/client/io/ChunkGroupInputStream.java  |   6 +-
 .../TestStorageContainerManagerHelper.java      |   8 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |   8 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java    |   8 +-
 .../ozone/container/ContainerTestHelper.java    |  84 +++---
 .../container/TestContainerReplication.java     |  24 +-
 .../common/TestBlockDeletingService.java        |  12 +-
 .../container/common/helpers/TestBlockData.java | 127 +++++++++
 .../container/common/helpers/TestKeyData.java   | 119 ---------
 .../common/impl/TestCloseContainerHandler.java  |  51 ++--
 .../common/impl/TestContainerPersistence.java   | 154 +++++------
 .../commandhandler/TestBlockDeletion.java       |   9 +-
 .../container/ozoneimpl/TestOzoneContainer.java | 100 ++++----
 .../server/TestContainerStateMachine.java       |   2 +-
 .../hadoop/ozone/om/TestOzoneManager.java       |   4 +-
 .../ozone/scm/TestContainerSmallFile.java       |   4 +-
 .../TestGetCommittedBlockLengthAndPutKey.java   |  12 +-
 .../hadoop/ozone/web/client/TestKeys.java       |  44 ++--
 .../hadoop/ozone/om/BucketManagerImpl.java      |   2 +-
 .../ozone/om/ScmBlockLocationTestIngClient.java |   2 +-
 .../genesis/BenchMarkDatanodeDispatcher.java    |  42 +--
 50 files changed, 1680 insertions(+), 1599 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 8d311d0..10b3bb5 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -23,7 +23,7 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.client.BlockID;
 
@@ -32,7 +32,8 @@ import java.io.OutputStream;
 import java.nio.ByteBuffer;
 import java.util.UUID;
 
-import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putKey;
+import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
+    .putBlock;
 import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls
     .writeChunk;
 
@@ -57,7 +58,7 @@ public class ChunkOutputStream extends OutputStream {
   private final BlockID blockID;
   private final String key;
   private final String traceID;
-  private final KeyData.Builder containerKeyData;
+  private final BlockData.Builder containerBlockData;
   private XceiverClientManager xceiverClientManager;
   private XceiverClientSpi xceiverClient;
   private ByteBuffer buffer;
@@ -84,7 +85,7 @@ public class ChunkOutputStream extends OutputStream {
     this.chunkSize = chunkSize;
     KeyValue keyValue = KeyValue.newBuilder()
         .setKey("TYPE").setValue("KEY").build();
-    this.containerKeyData = KeyData.newBuilder()
+    this.containerBlockData = BlockData.newBuilder()
         .setBlockID(blockID.getDatanodeBlockIDProtobuf())
         .addMetadata(keyValue);
     this.xceiverClientManager = xceiverClientManager;
@@ -154,7 +155,7 @@ public class ChunkOutputStream extends OutputStream {
         writeChunkToContainer();
       }
       try {
-        putKey(xceiverClient, containerKeyData.build(), traceID);
+        putBlock(xceiverClient, containerBlockData.build(), traceID);
       } catch (IOException e) {
         throw new IOException(
             "Unexpected Storage Container Exception: " + e.toString(), e);
@@ -230,6 +231,6 @@ public class ChunkOutputStream extends OutputStream {
       throw new IOException(
           "Unexpected Storage Container Exception: " + e.toString(), e);
     }
-    containerKeyData.addChunks(chunk);
+    containerBlockData.addChunks(chunk);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 33bf90c..db9d374 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -326,8 +326,8 @@ public final class HddsUtils {
     switch (proto.getCmdType()) {
     case ReadContainer:
     case ReadChunk:
-    case ListKey:
-    case GetKey:
+    case ListBlock:
+    case GetBlock:
     case GetSmallFile:
     case ListContainer:
     case ListChunk:
@@ -340,8 +340,8 @@ public final class HddsUtils {
     case CreateContainer:
     case DeleteChunk:
     case DeleteContainer:
-    case DeleteKey:
-    case PutKey:
+    case DeleteBlock:
+    case PutBlock:
     case PutSmallFile:
     default:
       return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 1d6a89d..6b7a328 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -35,16 +35,16 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .DatanodeBlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetKeyRequestProto;
+    .GetBlockRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetKeyResponseProto;
+    .GetBlockResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetSmallFileRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetSmallFileResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .PutKeyRequestProto;
+    .PutBlockRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .PutSmallFileRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -76,33 +76,33 @@ public final class ContainerProtocolCalls  {
   }
 
   /**
-   * Calls the container protocol to get a container key.
+   * Calls the container protocol to get a container block.
    *
    * @param xceiverClient client to perform call
    * @param datanodeBlockID blockID to identify container
    * @param traceID container protocol call args
-   * @return container protocol get key response
+   * @return container protocol get block response
    * @throws IOException if there is an I/O error while performing the call
    */
-  public static GetKeyResponseProto getKey(XceiverClientSpi xceiverClient,
+  public static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient,
       DatanodeBlockID datanodeBlockID, String traceID) throws IOException {
-    GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto
+    GetBlockRequestProto.Builder readBlockRequest = GetBlockRequestProto
         .newBuilder()
         .setBlockID(datanodeBlockID);
     String id = xceiverClient.getPipeline().getLeader().getUuidString();
 
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
-        .setCmdType(Type.GetKey)
+        .setCmdType(Type.GetBlock)
         .setContainerID(datanodeBlockID.getContainerID())
         .setTraceID(traceID)
         .setDatanodeUuid(id)
-        .setGetKey(readKeyRequest)
+        .setGetBlock(readBlockRequest)
         .build();
     ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
     validateContainerResponse(response);
 
-    return response.getGetKey();
+    return response.getGetBlock();
   }
 
   /**
@@ -136,26 +136,26 @@ public final class ContainerProtocolCalls  {
   }
 
   /**
-   * Calls the container protocol to put a container key.
+   * Calls the container protocol to put a container block.
    *
    * @param xceiverClient client to perform call
-   * @param containerKeyData key data to identify container
+   * @param containerBlockData block data to identify container
    * @param traceID container protocol call args
    * @throws IOException if there is an I/O error while performing the call
    */
-  public static void putKey(XceiverClientSpi xceiverClient,
-      KeyData containerKeyData, String traceID) throws IOException {
-    PutKeyRequestProto.Builder createKeyRequest = PutKeyRequestProto
+  public static void putBlock(XceiverClientSpi xceiverClient,
+      BlockData containerBlockData, String traceID) throws IOException {
+    PutBlockRequestProto.Builder createBlockRequest = PutBlockRequestProto
         .newBuilder()
-        .setKeyData(containerKeyData);
+        .setBlockData(containerBlockData);
     String id = xceiverClient.getPipeline().getLeader().getUuidString();
     ContainerCommandRequestProto request = ContainerCommandRequestProto
         .newBuilder()
-        .setCmdType(Type.PutKey)
-        .setContainerID(containerKeyData.getBlockID().getContainerID())
+        .setCmdType(Type.PutBlock)
+        .setContainerID(containerBlockData.getBlockID().getContainerID())
         .setTraceID(traceID)
         .setDatanodeUuid(id)
-        .setPutKey(createKeyRequest)
+        .setPutBlock(createBlockRequest)
         .build();
     ContainerCommandResponseProto response = xceiverClient.sendCommand(request);
     validateContainerResponse(response);
@@ -224,9 +224,9 @@ public final class ContainerProtocolCalls  {
 
   /**
    * Allows writing a small file using single RPC. This takes the container
-   * name, key name and data to write sends all that data to the container using
-   * a single RPC. This API is designed to be used for files which are smaller
-   * than 1 MB.
+   * name, block name and data to write sends all that data to the container
+   * using a single RPC. This API is designed to be used for files which are
+   * smaller than 1 MB.
    *
    * @param client - client that communicates with the container.
    * @param blockID - ID of the block
@@ -238,12 +238,12 @@ public final class ContainerProtocolCalls  {
       BlockID blockID, byte[] data, String traceID)
       throws IOException {
 
-    KeyData containerKeyData =
-        KeyData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
+    BlockData containerBlockData =
+        BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
             .build();
-    PutKeyRequestProto.Builder createKeyRequest =
-        PutKeyRequestProto.newBuilder()
-            .setKeyData(containerKeyData);
+    PutBlockRequestProto.Builder createBlockRequest =
+        PutBlockRequestProto.newBuilder()
+            .setBlockData(containerBlockData);
 
     KeyValue keyValue =
         KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
@@ -255,7 +255,7 @@ public final class ContainerProtocolCalls  {
 
     PutSmallFileRequestProto putSmallFileRequest =
         PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)
-            .setKey(createKeyRequest).setData(ByteString.copyFrom(data))
+            .setBlock(createBlockRequest).setData(ByteString.copyFrom(data))
             .build();
 
     String id = client.getPipeline().getLeader().getUuidString();
@@ -387,12 +387,12 @@ public final class ContainerProtocolCalls  {
    */
   public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client,
       BlockID blockID, String traceID) throws IOException {
-    GetKeyRequestProto.Builder getKey = GetKeyRequestProto
+    GetBlockRequestProto.Builder getBlock = GetBlockRequestProto
         .newBuilder()
         .setBlockID(blockID.getDatanodeBlockIDProtobuf());
     ContainerProtos.GetSmallFileRequestProto getSmallFileRequest =
         GetSmallFileRequestProto
-            .newBuilder().setKey(getKey)
+            .newBuilder().setBlock(getBlock)
             .build();
     String id = client.getPipeline().getLeader().getUuidString();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
new file mode 100644
index 0000000..0c1d427
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.client.BlockID;
+import com.google.common.base.Preconditions;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.ArrayList;
+
+/**
+ * Helper class to convert Protobuf to Java classes.
+ */
+public class BlockData {
+  private final BlockID blockID;
+  private final Map<String, String> metadata;
+
+  /**
+   * Represent a list of chunks.
+   * In order to reduce memory usage, chunkList is declared as an
+   * {@link Object}.
+   * When #elements == 0, chunkList is null.
+   * When #elements == 1, chunkList refers to the only element.
+   * When #elements > 1, chunkList refers to the list.
+   *
+   * Please note : when we are working with blocks, we don't care what they
+   * point to. So we We don't read chunkinfo nor validate them. It is
+   * responsibility of higher layer like ozone. We just read and write data
+   * from network.
+   */
+  private Object chunkList;
+
+  /**
+   * total size of the key.
+   */
+  private long size;
+
+  /**
+   * Constructs a BlockData Object.
+   *
+   * @param blockID
+   */
+  public BlockData(BlockID blockID) {
+    this.blockID = blockID;
+    this.metadata = new TreeMap<>();
+    this.size = 0;
+  }
+
+  /**
+   * Returns a blockData object from the protobuf data.
+   *
+   * @param data - Protobuf data.
+   * @return - BlockData
+   * @throws IOException
+   */
+  public static BlockData getFromProtoBuf(ContainerProtos.BlockData data) throws
+      IOException {
+    BlockData blockData = new BlockData(
+        BlockID.getFromProtobuf(data.getBlockID()));
+    for (int x = 0; x < data.getMetadataCount(); x++) {
+      blockData.addMetadata(data.getMetadata(x).getKey(),
+          data.getMetadata(x).getValue());
+    }
+    blockData.setChunks(data.getChunksList());
+    if (data.hasSize()) {
+      Preconditions.checkArgument(data.getSize() == blockData.getSize());
+    }
+    return blockData;
+  }
+
+  /**
+   * Returns a Protobuf message from BlockData.
+   * @return Proto Buf Message.
+   */
+  public ContainerProtos.BlockData getProtoBufMessage() {
+    ContainerProtos.BlockData.Builder builder =
+        ContainerProtos.BlockData.newBuilder();
+    builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
+    for (Map.Entry<String, String> entry : metadata.entrySet()) {
+      ContainerProtos.KeyValue.Builder keyValBuilder =
+          ContainerProtos.KeyValue.newBuilder();
+      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
+          .setValue(entry.getValue()).build());
+    }
+    builder.addAllChunks(getChunks());
+    builder.setSize(size);
+    return builder.build();
+  }
+
+  /**
+   * Adds metadata.
+   *
+   * @param key   - Key
+   * @param value - Value
+   * @throws IOException
+   */
+  public synchronized void addMetadata(String key, String value) throws
+      IOException {
+    if (this.metadata.containsKey(key)) {
+      throw new IOException("This key already exists. Key " + key);
+    }
+    metadata.put(key, value);
+  }
+
+  public synchronized Map<String, String> getMetadata() {
+    return Collections.unmodifiableMap(this.metadata);
+  }
+
+  /**
+   * Returns value of a key.
+   */
+  public synchronized String getValue(String key) {
+    return metadata.get(key);
+  }
+
+  /**
+   * Deletes a metadata entry from the map.
+   *
+   * @param key - Key
+   */
+  public synchronized void deleteKey(String key) {
+    metadata.remove(key);
+  }
+
+  @SuppressWarnings("unchecked")
+  private List<ContainerProtos.ChunkInfo> castChunkList() {
+    return (List<ContainerProtos.ChunkInfo>)chunkList;
+  }
+
+  /**
+   * Returns chunks list.
+   *
+   * @return list of chunkinfo.
+   */
+  public List<ContainerProtos.ChunkInfo> getChunks() {
+    return chunkList == null? Collections.emptyList()
+        : chunkList instanceof ContainerProtos.ChunkInfo?
+            Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
+        : Collections.unmodifiableList(castChunkList());
+  }
+
+  /**
+   * Adds chinkInfo to the list.
+   */
+  public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
+    if (chunkList == null) {
+      chunkList = chunkInfo;
+    } else {
+      final List<ContainerProtos.ChunkInfo> list;
+      if (chunkList instanceof ContainerProtos.ChunkInfo) {
+        list = new ArrayList<>(2);
+        list.add((ContainerProtos.ChunkInfo)chunkList);
+        chunkList = list;
+      } else {
+        list = castChunkList();
+      }
+      list.add(chunkInfo);
+    }
+    size += chunkInfo.getLen();
+  }
+
+  /**
+   * removes the chunk.
+   */
+  public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
+    final boolean removed;
+    if (chunkList instanceof List) {
+      final List<ContainerProtos.ChunkInfo> list = castChunkList();
+      removed = list.remove(chunkInfo);
+      if (list.size() == 1) {
+        chunkList = list.get(0);
+      }
+    } else if (chunkInfo.equals(chunkList)) {
+      chunkList = null;
+      removed = true;
+    } else {
+      removed = false;
+    }
+
+    if (removed) {
+      size -= chunkInfo.getLen();
+    }
+    return removed;
+  }
+
+  /**
+   * Returns container ID.
+   *
+   * @return long.
+   */
+  public long getContainerID() {
+    return blockID.getContainerID();
+  }
+
+  /**
+   * Returns LocalID.
+   * @return long.
+   */
+  public long getLocalID() {
+    return blockID.getLocalID();
+  }
+
+  /**
+   * Return Block ID.
+   * @return BlockID.
+   */
+  public BlockID getBlockID() {
+    return blockID;
+  }
+
+  /**
+   * Sets Chunk list.
+   *
+   * @param chunks - List of chunks.
+   */
+  public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
+    if (chunks == null) {
+      chunkList = null;
+      size = 0L;
+    } else {
+      final int n = chunks.size();
+      chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
+      size = chunks.parallelStream().mapToLong(
+          ContainerProtos.ChunkInfo::getLen).sum();
+    }
+  }
+
+  /**
+   * Get the total size of chunks allocated for the key.
+   * @return total size of the key.
+   */
+  public long getSize() {
+    return size;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
deleted file mode 100644
index ee27021..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.client.BlockID;
-import com.google.common.base.Preconditions;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.ArrayList;
-
-/**
- * Helper class to convert Protobuf to Java classes.
- */
-public class KeyData {
-  private final BlockID blockID;
-  private final Map<String, String> metadata;
-
-  /**
-   * Represent a list of chunks.
-   * In order to reduce memory usage, chunkList is declared as an
-   * {@link Object}.
-   * When #elements == 0, chunkList is null.
-   * When #elements == 1, chunkList refers to the only element.
-   * When #elements > 1, chunkList refers to the list.
-   *
-   * Please note : when we are working with keys, we don't care what they point
-   * to. So we We don't read chunkinfo nor validate them. It is responsibility
-   * of higher layer like ozone. We just read and write data from network.
-   */
-  private Object chunkList;
-
-  /**
-   * total size of the key.
-   */
-  private long size;
-
-  /**
-   * Constructs a KeyData Object.
-   *
-   * @param blockID
-   */
-  public KeyData(BlockID blockID) {
-    this.blockID = blockID;
-    this.metadata = new TreeMap<>();
-    this.size = 0;
-  }
-
-  /**
-   * Returns a keyData object from the protobuf data.
-   *
-   * @param data - Protobuf data.
-   * @return - KeyData
-   * @throws IOException
-   */
-  public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws
-      IOException {
-    KeyData keyData = new KeyData(BlockID.getFromProtobuf(data.getBlockID()));
-    for (int x = 0; x < data.getMetadataCount(); x++) {
-      keyData.addMetadata(data.getMetadata(x).getKey(),
-          data.getMetadata(x).getValue());
-    }
-    keyData.setChunks(data.getChunksList());
-    if (data.hasSize()) {
-      Preconditions.checkArgument(data.getSize() == keyData.getSize());
-    }
-    return keyData;
-  }
-
-  /**
-   * Returns a Protobuf message from KeyData.
-   * @return Proto Buf Message.
-   */
-  public ContainerProtos.KeyData getProtoBufMessage() {
-    ContainerProtos.KeyData.Builder builder =
-        ContainerProtos.KeyData.newBuilder();
-    builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf());
-    for (Map.Entry<String, String> entry : metadata.entrySet()) {
-      ContainerProtos.KeyValue.Builder keyValBuilder =
-          ContainerProtos.KeyValue.newBuilder();
-      builder.addMetadata(keyValBuilder.setKey(entry.getKey())
-          .setValue(entry.getValue()).build());
-    }
-    builder.addAllChunks(getChunks());
-    builder.setSize(size);
-    return builder.build();
-  }
-
-  /**
-   * Adds metadata.
-   *
-   * @param key   - Key
-   * @param value - Value
-   * @throws IOException
-   */
-  public synchronized void addMetadata(String key, String value) throws
-      IOException {
-    if (this.metadata.containsKey(key)) {
-      throw new IOException("This key already exists. Key " + key);
-    }
-    metadata.put(key, value);
-  }
-
-  public synchronized Map<String, String> getMetadata() {
-    return Collections.unmodifiableMap(this.metadata);
-  }
-
-  /**
-   * Returns value of a key.
-   */
-  public synchronized String getValue(String key) {
-    return metadata.get(key);
-  }
-
-  /**
-   * Deletes a metadata entry from the map.
-   *
-   * @param key - Key
-   */
-  public synchronized void deleteKey(String key) {
-    metadata.remove(key);
-  }
-
-  @SuppressWarnings("unchecked")
-  private List<ContainerProtos.ChunkInfo> castChunkList() {
-    return (List<ContainerProtos.ChunkInfo>)chunkList;
-  }
-
-  /**
-   * Returns chunks list.
-   *
-   * @return list of chunkinfo.
-   */
-  public List<ContainerProtos.ChunkInfo> getChunks() {
-    return chunkList == null? Collections.emptyList()
-        : chunkList instanceof ContainerProtos.ChunkInfo?
-            Collections.singletonList((ContainerProtos.ChunkInfo)chunkList)
-        : Collections.unmodifiableList(castChunkList());
-  }
-
-  /**
-   * Adds chinkInfo to the list.
-   */
-  public void addChunk(ContainerProtos.ChunkInfo chunkInfo) {
-    if (chunkList == null) {
-      chunkList = chunkInfo;
-    } else {
-      final List<ContainerProtos.ChunkInfo> list;
-      if (chunkList instanceof ContainerProtos.ChunkInfo) {
-        list = new ArrayList<>(2);
-        list.add((ContainerProtos.ChunkInfo)chunkList);
-        chunkList = list;
-      } else {
-        list = castChunkList();
-      }
-      list.add(chunkInfo);
-    }
-    size += chunkInfo.getLen();
-  }
-
-  /**
-   * removes the chunk.
-   */
-  public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) {
-    final boolean removed;
-    if (chunkList instanceof List) {
-      final List<ContainerProtos.ChunkInfo> list = castChunkList();
-      removed = list.remove(chunkInfo);
-      if (list.size() == 1) {
-        chunkList = list.get(0);
-      }
-    } else if (chunkInfo.equals(chunkList)) {
-      chunkList = null;
-      removed = true;
-    } else {
-      removed = false;
-    }
-
-    if (removed) {
-      size -= chunkInfo.getLen();
-    }
-    return removed;
-  }
-
-  /**
-   * Returns container ID.
-   *
-   * @return long.
-   */
-  public long getContainerID() {
-    return blockID.getContainerID();
-  }
-
-  /**
-   * Returns LocalID.
-   * @return long.
-   */
-  public long getLocalID() {
-    return blockID.getLocalID();
-  }
-
-  /**
-   * Return Block ID.
-   * @return BlockID.
-   */
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  /**
-   * Sets Chunk list.
-   *
-   * @param chunks - List of chunks.
-   */
-  public void setChunks(List<ContainerProtos.ChunkInfo> chunks) {
-    if (chunks == null) {
-      chunkList = null;
-      size = 0L;
-    } else {
-      final int n = chunks.size();
-      chunkList = n == 0? null: n == 1? chunks.get(0): chunks;
-      size = chunks.parallelStream().mapToLong(
-          ContainerProtos.ChunkInfo::getLen).sum();
-    }
-  }
-
-  /**
-   * Get the total size of chunks allocated for the key.
-   * @return total size of the key.
-   */
-  public long getSize() {
-    return size;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index ba0d2d4..7be8a62 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -49,13 +49,13 @@ package hadoop.hdds.datanode;
  *  5. ListContainer - Returns the list of containers on this
  *     datanode. This will be used by tests and tools.
  *
- *  6. PutKey - Given a valid container, creates a key.
+ *  6. PutBlock - Given a valid container, creates a block.
  *
- *  7. GetKey - Allows user to read the metadata of a Key.
+ *  7. GetBlock - Allows user to read the metadata of a block.
  *
- *  8. DeleteKey - Deletes a given key.
+ *  8. DeleteBlock - Deletes a given block.
  *
- *  9. ListKey - Returns a list of keys that are present inside
+ *  9. ListBlock - Returns a list of blocks that are present inside
  *      a given container.
  *
  *  10. ReadChunk - Allows us to read a chunk.
@@ -64,13 +64,13 @@ package hadoop.hdds.datanode;
  *
  *  12. WriteChunk - Allows us to write a chunk
  *
- *  13. ListChunk - Given a Container/Key returns the list of Chunks.
+ *  13. ListChunk - Given a Container/Block returns the list of Chunks.
  *
  *  14. CompactChunk - Re-writes a chunk based on Offsets.
  *
- *  15. PutSmallFile - A single RPC that combines both putKey and WriteChunk.
+ *  15. PutSmallFile - A single RPC that combines both putBlock and WriteChunk.
  *
- *  16. GetSmallFile - A single RPC that combines both getKey and ReadChunk.
+ *  16. GetSmallFile - A single RPC that combines both getBlock and ReadChunk.
  *
  *  17. CloseContainer - Closes an open container and makes it immutable.
  *
@@ -84,10 +84,10 @@ enum Type {
   DeleteContainer = 4;
   ListContainer = 5;
 
-  PutKey = 6;
-  GetKey = 7;
-  DeleteKey = 8;
-  ListKey = 9;
+  PutBlock = 6;
+  GetBlock = 7;
+  DeleteBlock = 8;
+  ListBlock = 9;
 
   ReadChunk = 10;
   DeleteChunk = 11;
@@ -95,7 +95,7 @@ enum Type {
   ListChunk = 13;
   CompactChunk = 14;
 
-  /** Combines Key and Chunk Operation into Single RPC. */
+  /** Combines Block and Chunk Operation into Single RPC. */
   PutSmallFile = 15;
   GetSmallFile = 16;
   CloseContainer = 17;
@@ -115,7 +115,7 @@ enum Result {
   CONTAINER_NOT_FOUND = 9;
   IO_EXCEPTION = 10;
   UNABLE_TO_READ_METADATA_DB = 11;
-  NO_SUCH_KEY = 12;
+  NO_SUCH_BLOCK = 12;
   OVERWRITE_FLAG_REQUIRED = 13;
   UNABLE_TO_FIND_DATA_DIR = 14;
   INVALID_WRITE_SIZE = 15;
@@ -185,10 +185,10 @@ message ContainerCommandRequestProto {
   optional   ListContainerRequestProto listContainer = 9;
   optional   CloseContainerRequestProto closeContainer = 10;
 
-  optional   PutKeyRequestProto putKey = 11;
-  optional   GetKeyRequestProto getKey = 12;
-  optional   DeleteKeyRequestProto deleteKey = 13;
-  optional   ListKeyRequestProto listKey = 14;
+  optional   PutBlockRequestProto putBlock = 11;
+  optional   GetBlockRequestProto getBlock = 12;
+  optional   DeleteBlockRequestProto deleteBlock = 13;
+  optional   ListBlockRequestProto listBlock = 14;
 
   optional   ReadChunkRequestProto readChunk = 15;
   optional   WriteChunkRequestProto writeChunk = 16;
@@ -215,10 +215,10 @@ message ContainerCommandResponseProto {
   optional   ListContainerResponseProto listContainer = 9;
   optional   CloseContainerResponseProto closeContainer = 10;
 
-  optional   PutKeyResponseProto putKey = 11;
-  optional   GetKeyResponseProto getKey = 12;
-  optional   DeleteKeyResponseProto deleteKey = 13;
-  optional   ListKeyResponseProto listKey = 14;
+  optional   PutBlockResponseProto putBlock = 11;
+  optional   GetBlockResponseProto getBlock = 12;
+  optional   DeleteBlockResponseProto deleteBlock = 13;
+  optional   ListBlockResponseProto listBlock = 14;
 
   optional   WriteChunkResponseProto writeChunk = 15;
   optional   ReadChunkResponseProto readChunk = 16;
@@ -294,7 +294,7 @@ message CloseContainerResponseProto {
   optional int64 containerID = 2;
 }
 
-message KeyData {
+message BlockData {
   required DatanodeBlockID blockID = 1;
   optional int64 flags = 2; // for future use.
   repeated KeyValue metadata = 3;
@@ -302,25 +302,25 @@ message KeyData {
   optional int64 size = 5;
 }
 
-// Key Messages.
-message  PutKeyRequestProto {
-  required KeyData keyData = 1;
+// Block Messages.
+message  PutBlockRequestProto {
+  required BlockData blockData = 1;
 }
 
-message  PutKeyResponseProto {
+message  PutBlockResponseProto {
   required GetCommittedBlockLengthResponseProto committedBlockLength = 1;
 }
 
-message  GetKeyRequestProto  {
+message  GetBlockRequestProto  {
   required DatanodeBlockID blockID = 1;
 }
 
-message  GetKeyResponseProto  {
-  required KeyData keyData = 1;
+message  GetBlockResponseProto  {
+  required BlockData blockData = 1;
 }
 
 
-message  DeleteKeyRequestProto {
+message  DeleteBlockRequestProto {
   required DatanodeBlockID blockID = 1;
 }
 
@@ -333,17 +333,17 @@ message  GetCommittedBlockLengthResponseProto {
   required int64 blockLength = 2;
 }
 
-message   DeleteKeyResponseProto {
+message   DeleteBlockResponseProto {
 }
 
-message  ListKeyRequestProto {
+message  ListBlockRequestProto {
   optional int64 startLocalID = 2;
   required uint32 count = 3;
 
 }
 
-message  ListKeyResponseProto {
-  repeated KeyData keyData = 1;
+message  ListBlockResponseProto {
+  repeated BlockData blockData = 1;
 }
 
 // Chunk Operations
@@ -401,11 +401,11 @@ message  ListChunkResponseProto {
   repeated ChunkInfo chunkData = 1;
 }
 
-/** For small file access combines write chunk and putKey into a single
+/** For small file access combines write chunk and putBlock into a single
 RPC */
 
 message PutSmallFileRequestProto {
-  required PutKeyRequestProto key = 1;
+  required PutBlockRequestProto block = 1;
   required ChunkInfo chunkInfo = 2;
   required bytes data = 3;
 }
@@ -416,7 +416,7 @@ message PutSmallFileResponseProto {
 }
 
 message GetSmallFileRequestProto {
-  required GetKeyRequestProto key = 1;
+  required GetBlockRequestProto block = 1;
 }
 
 message GetSmallFileResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
index 1ef3d0d..b736eb5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java
@@ -22,7 +22,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 
 import java.util.ArrayList;
 import java.util.Collections;
@@ -33,9 +33,9 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.function.Function;
 
 /**
- * Map: containerId -> (localId -> {@link KeyData}).
+ * Map: containerId {@literal ->} (localId {@literal ->} {@link BlockData}).
  * The outer container map does not entail locking for a better performance.
- * The inner {@link KeyDataMap} is synchronized.
+ * The inner {@link BlockDataMap} is synchronized.
  *
  * This class will maintain list of open keys per container when closeContainer
  * command comes, it should autocommit all open keys of a open container before
@@ -43,16 +43,16 @@ import java.util.function.Function;
  */
 public class OpenContainerBlockMap {
   /**
-   * Map: localId -> KeyData.
+   * Map: localId {@literal ->} BlockData.
    *
    * In order to support {@link #getAll()}, the update operations are
    * synchronized.
    */
-  static class KeyDataMap {
-    private final ConcurrentMap<Long, KeyData> blocks =
+  static class BlockDataMap {
+    private final ConcurrentMap<Long, BlockData> blocks =
         new ConcurrentHashMap<>();
 
-    KeyData get(long localId) {
+    BlockData get(long localId) {
       return blocks.get(localId);
     }
 
@@ -61,12 +61,12 @@ public class OpenContainerBlockMap {
       return blocks.size();
     }
 
-    synchronized KeyData computeIfAbsent(
-        long localId, Function<Long, KeyData> f) {
+    synchronized BlockData computeIfAbsent(
+        long localId, Function<Long, BlockData> f) {
       return blocks.computeIfAbsent(localId, f);
     }
 
-    synchronized List<KeyData> getAll() {
+    synchronized List<BlockData> getAll() {
       return new ArrayList<>(blocks.values());
     }
   }
@@ -79,7 +79,7 @@ public class OpenContainerBlockMap {
    *
    * For now, we will track all open blocks of a container in the blockMap.
    */
-  private final ConcurrentMap<Long, KeyDataMap> containers =
+  private final ConcurrentMap<Long, BlockDataMap> containers =
       new ConcurrentHashMap<>();
 
   /**
@@ -94,9 +94,9 @@ public class OpenContainerBlockMap {
 
   public void addChunk(BlockID blockID, ChunkInfo info) {
     Preconditions.checkNotNull(info);
-    containers.computeIfAbsent(blockID.getContainerID(), id -> new KeyDataMap())
-        .computeIfAbsent(blockID.getLocalID(), id -> new KeyData(blockID))
-        .addChunk(info);
+    containers.computeIfAbsent(blockID.getContainerID(),
+        id -> new BlockDataMap()).computeIfAbsent(blockID.getLocalID(),
+          id -> new BlockData(blockID)).addChunk(info);
   }
 
   /**
@@ -113,21 +113,21 @@ public class OpenContainerBlockMap {
   }
 
   /**
-   * Returns the list of open to the openContainerBlockMap.
+   * Returns the list of open blocks to the openContainerBlockMap.
    * @param containerId container id
-   * @return List of open Keys(blocks)
+   * @return List of open blocks
    */
-  public List<KeyData> getOpenKeys(long containerId) {
+  public List<BlockData> getOpenBlocks(long containerId) {
     return Optional.ofNullable(containers.get(containerId))
-        .map(KeyDataMap::getAll)
+        .map(BlockDataMap::getAll)
         .orElseGet(Collections::emptyList);
   }
 
   /**
    * removes the block from the block map.
-   * @param blockID
+   * @param blockID - block ID
    */
-  public void removeFromKeyMap(BlockID blockID) {
+  public void removeFromBlockMap(BlockID blockID) {
     Preconditions.checkNotNull(blockID);
     containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks)
         -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks);
@@ -136,16 +136,16 @@ public class OpenContainerBlockMap {
   /**
    * Returns true if the block exists in the map, false otherwise.
    *
-   * @param blockID
+   * @param blockID  - Block ID.
    * @return True, if it exists, false otherwise
    */
   public boolean checkIfBlockExists(BlockID blockID) {
-    KeyDataMap keyDataMap = containers.get(blockID.getContainerID());
+    BlockDataMap keyDataMap = containers.get(blockID.getContainerID());
     return keyDataMap != null && keyDataMap.get(blockID.getLocalID()) != null;
   }
 
   @VisibleForTesting
-  KeyDataMap getKeyDataMap(long containerId) {
+  BlockDataMap getBlockDataMap(long containerId) {
     return containers.get(containerId);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index b0d4cbc..430b0ef 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.container.common.helpers
     .DeletedContainerBlocksSummary;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.statemachine
     .EndpointStateMachine;
@@ -199,7 +199,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
     }
 
     int newDeletionBlocks = 0;
-    MetadataStore containerDB = KeyUtils.getDB(containerData, conf);
+    MetadataStore containerDB = BlockUtils.getDB(containerData, conf);
     for (Long blk : delTX.getLocalIDList()) {
       BatchOperation batch = new BatchOperation();
       byte[] blkBytes = Longs.toByteArray(blk);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index b84db66..a7bef86 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -103,10 +103,10 @@ import java.util.stream.Collectors;
  * implementation. For example, synchronization between writeChunk and
  * createContainer in {@link ContainerStateMachine}.
  *
- * PutKey is synchronized with WriteChunk operations, PutKey for a block is
- * executed only after all the WriteChunk preceding the PutKey have finished.
+ * PutBlock is synchronized with WriteChunk operations, PutBlock for a block is
+ * executed only after all the WriteChunk preceding the PutBlock have finished.
  *
- * CloseContainer is synchronized with WriteChunk and PutKey operations,
+ * CloseContainer is synchronized with WriteChunk and PutBlock operations,
  * CloseContainer for a container is processed after all the preceding write
  * operations for the container have finished.
  * */
@@ -443,7 +443,7 @@ public class ContainerStateMachine extends BaseStateMachine {
 
   /**
    * This class maintains maps and provide utilities to enforce synchronization
-   * among createContainer, writeChunk, putKey and closeContainer.
+   * among createContainer, writeChunk, putBlock and closeContainer.
    */
   private class StateMachineHelper {
 
@@ -453,7 +453,7 @@ public class ContainerStateMachine extends BaseStateMachine {
     private final ConcurrentHashMap<Long, CommitChunkFutureMap>
         block2ChunkMap;
 
-    // Map for putKey futures
+    // Map for putBlock futures
     private final ConcurrentHashMap<Long, CompletableFuture<Message>>
         blockCommitMap;
 
@@ -505,11 +505,11 @@ public class ContainerStateMachine extends BaseStateMachine {
     // The following section handles applyTransaction transactions
     // on a container
 
-    private CompletableFuture<Message> handlePutKey(
+    private CompletableFuture<Message> handlePutBlock(
         ContainerCommandRequestProto requestProto) {
       List<CompletableFuture<Message>> futureList = new ArrayList<>();
       long localId =
-          requestProto.getPutKey().getKeyData().getBlockID().getLocalID();
+          requestProto.getPutBlock().getBlockData().getBlockID().getLocalID();
       // Need not wait for create container future here as it has already
       // finished.
       if (block2ChunkMap.get(localId) != null) {
@@ -518,18 +518,18 @@ public class ContainerStateMachine extends BaseStateMachine {
       CompletableFuture<Message> effectiveFuture =
           runCommandAfterFutures(futureList, requestProto);
 
-      CompletableFuture<Message> putKeyFuture =
+      CompletableFuture<Message> putBlockFuture =
           effectiveFuture.thenApply(message -> {
             blockCommitMap.remove(localId);
             return message;
           });
-      blockCommitMap.put(localId, putKeyFuture);
-      return putKeyFuture;
+      blockCommitMap.put(localId, putBlockFuture);
+      return putBlockFuture;
     }
 
     // Close Container should be executed only if all pending WriteType
     // container cmds get executed. Transactions which can return a future
-    // are WriteChunk and PutKey.
+    // are WriteChunk and PutBlock.
     private CompletableFuture<Message> handleCloseContainer(
         ContainerCommandRequestProto requestProto) {
       List<CompletableFuture<Message>> futureList = new ArrayList<>();
@@ -539,7 +539,7 @@ public class ContainerStateMachine extends BaseStateMachine {
       block2ChunkMap.values().forEach(b -> futureList.addAll(b.getAll()));
       futureList.addAll(blockCommitMap.values());
 
-      // There are pending write Chunk/PutKey type requests
+      // There are pending write Chunk/PutBlock type requests
       // Queue this closeContainer request behind all these requests
       CompletableFuture<Message> closeContainerFuture =
           runCommandAfterFutures(futureList, requestProto);
@@ -615,8 +615,8 @@ public class ContainerStateMachine extends BaseStateMachine {
         return handleChunkCommit(requestProto, index);
       case CloseContainer:
         return handleCloseContainer(requestProto);
-      case PutKey:
-        return handlePutKey(requestProto);
+      case PutBlock:
+        return handlePutBlock(requestProto);
       case CreateContainer:
         return handleCreateContainer(requestProto);
       default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
index f800223..535af29 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
@@ -21,12 +21,12 @@ package org.apache.hadoop.ozone.container.keyvalue;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
 import org.apache.hadoop.utils.MetaStoreIterator;
 import org.apache.hadoop.utils.MetadataKeyFilters;
@@ -48,7 +48,7 @@ import java.util.NoSuchElementException;
  * {@link MetadataKeyFilters#getNormalKeyFilter()}
  */
 @InterfaceAudience.Public
-public class KeyValueBlockIterator implements BlockIterator<KeyData> {
+public class KeyValueBlockIterator implements BlockIterator<BlockData> {
 
   private static final Logger LOG = LoggerFactory.getLogger(
       KeyValueBlockIterator.class);
@@ -57,7 +57,7 @@ public class KeyValueBlockIterator implements BlockIterator<KeyData> {
   private static KeyPrefixFilter defaultBlockFilter = MetadataKeyFilters
       .getNormalKeyFilter();
   private KeyPrefixFilter blockFilter;
-  private KeyData nextBlock;
+  private BlockData nextBlock;
   private long containerId;
 
   /**
@@ -91,7 +91,7 @@ public class KeyValueBlockIterator implements BlockIterator<KeyData> {
         containerData;
     keyValueContainerData.setDbFile(KeyValueContainerLocationUtil
         .getContainerDBFile(metdataPath, containerId));
-    MetadataStore metadataStore = KeyUtils.getDB(keyValueContainerData, new
+    MetadataStore metadataStore = BlockUtils.getDB(keyValueContainerData, new
         OzoneConfiguration());
     blockIterator = metadataStore.iterator();
     blockFilter = filter;
@@ -103,9 +103,9 @@ public class KeyValueBlockIterator implements BlockIterator<KeyData> {
    * @throws IOException
    */
   @Override
-  public KeyData nextBlock() throws IOException, NoSuchElementException {
+  public BlockData nextBlock() throws IOException, NoSuchElementException {
     if (nextBlock != null) {
-      KeyData currentBlock = nextBlock;
+      BlockData currentBlock = nextBlock;
       nextBlock = null;
       return currentBlock;
     }
@@ -124,7 +124,7 @@ public class KeyValueBlockIterator implements BlockIterator<KeyData> {
     if (blockIterator.hasNext()) {
       KeyValue block = blockIterator.next();
       if (blockFilter.filterKey(null, block.getKey(), null)) {
-        nextBlock = KeyUtils.getKeyData(block.getValue());
+        nextBlock = BlockUtils.getBlockData(block.getValue());
         LOG.trace("Block matching with filter found: blockID is : {} for " +
             "containerID {}", nextBlock.getLocalID(), containerId);
         return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 0870c76..09d4054 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
 import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.keyvalue.helpers
     .KeyValueContainerLocationUtil;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
@@ -293,7 +293,7 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
     // It is ok if this operation takes a bit of time.
     // Close container is not expected to be instantaneous.
     try {
-      MetadataStore db = KeyUtils.getDB(containerData, config);
+      MetadataStore db = BlockUtils.getDB(containerData, config);
       db.compactDB();
     } catch (StorageContainerException ex) {
       throw ex;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 5acecb4..5be6e28 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -48,10 +48,10 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.OpenContainerBlockMap;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -62,13 +62,13 @@ import org.apache.hadoop.ozone.container.common.volume
     .RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.SmallFileUtils;
 import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.keyvalue.impl.KeyManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
+import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
     .BlockDeletingService;
 import org.apache.hadoop.util.AutoCloseableLock;
@@ -117,7 +117,7 @@ public class KeyValueHandler extends Handler {
       KeyValueHandler.class);
 
   private final ContainerType containerType;
-  private final KeyManager keyManager;
+  private final BlockManager blockManager;
   private final ChunkManager chunkManager;
   private final BlockDeletingService blockDeletingService;
   private final VolumeChoosingPolicy volumeChoosingPolicy;
@@ -129,7 +129,7 @@ public class KeyValueHandler extends Handler {
       VolumeSet volSet, ContainerMetrics metrics) {
     super(config, contSet, volSet, metrics);
     containerType = ContainerType.KeyValueContainer;
-    keyManager = new KeyManagerImpl(config);
+    blockManager = new BlockManagerImpl(config);
     chunkManager = new ChunkManagerImpl();
     long svcInterval = config
         .getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
@@ -187,13 +187,13 @@ public class KeyValueHandler extends Handler {
       return handleUnsupportedOp(request);
     case CloseContainer:
       return handleCloseContainer(request, kvContainer);
-    case PutKey:
-      return handlePutKey(request, kvContainer);
-    case GetKey:
-      return handleGetKey(request, kvContainer);
-    case DeleteKey:
-      return handleDeleteKey(request, kvContainer);
-    case ListKey:
+    case PutBlock:
+      return handlePutBlock(request, kvContainer);
+    case GetBlock:
+      return handleGetBlock(request, kvContainer);
+    case DeleteBlock:
+      return handleDeleteBlock(request, kvContainer);
+    case ListBlock:
       return handleUnsupportedOp(request);
     case ReadChunk:
       return handleReadChunk(request, kvContainer);
@@ -222,8 +222,8 @@ public class KeyValueHandler extends Handler {
   }
 
   @VisibleForTesting
-  public KeyManager getKeyManager() {
-    return this.keyManager;
+  public BlockManager getBlockManager() {
+    return this.blockManager;
   }
 
   /**
@@ -413,7 +413,7 @@ public class KeyValueHandler extends Handler {
       // remove the container from open block map once, all the blocks
       // have been committed and the container is closed
       kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSING);
-      commitPendingKeys(kvContainer);
+      commitPendingBlocks(kvContainer);
       kvContainer.close();
       // make sure the the container open keys from BlockMap gets removed
       openContainerBlockMap.removeContainer(kvData.getContainerID());
@@ -429,13 +429,13 @@ public class KeyValueHandler extends Handler {
   }
 
   /**
-   * Handle Put Key operation. Calls KeyManager to process the request.
+   * Handle Put Block operation. Calls BlockManager to process the request.
    */
-  ContainerCommandResponseProto handlePutKey(
+  ContainerCommandResponseProto handlePutBlock(
       ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
 
     long blockLength;
-    if (!request.hasPutKey()) {
+    if (!request.hasPutBlock()) {
       LOG.debug("Malformed Put Key request. trace ID: {}",
           request.getTraceID());
       return ContainerUtils.malformedRequest(request);
@@ -444,11 +444,11 @@ public class KeyValueHandler extends Handler {
     try {
       checkContainerOpen(kvContainer);
 
-      KeyData keyData = KeyData.getFromProtoBuf(
-          request.getPutKey().getKeyData());
-      long numBytes = keyData.getProtoBufMessage().toByteArray().length;
-      blockLength = commitKey(keyData, kvContainer);
-      metrics.incContainerBytesStats(Type.PutKey, numBytes);
+      BlockData blockData = BlockData.getFromProtoBuf(
+          request.getPutBlock().getBlockData());
+      long numBytes = blockData.getProtoBufMessage().toByteArray().length;
+      blockLength = commitKey(blockData, kvContainer);
+      metrics.incContainerBytesStats(Type.PutBlock, numBytes);
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
     } catch (IOException ex) {
@@ -457,46 +457,46 @@ public class KeyValueHandler extends Handler {
           request);
     }
 
-    return KeyUtils.putKeyResponseSuccess(request, blockLength);
+    return BlockUtils.putBlockResponseSuccess(request, blockLength);
   }
 
-  private void commitPendingKeys(KeyValueContainer kvContainer)
+  private void commitPendingBlocks(KeyValueContainer kvContainer)
       throws IOException {
     long containerId = kvContainer.getContainerData().getContainerID();
-    List<KeyData> pendingKeys =
-        this.openContainerBlockMap.getOpenKeys(containerId);
-    for(KeyData keyData : pendingKeys) {
-      commitKey(keyData, kvContainer);
+    List<BlockData> pendingBlocks =
+        this.openContainerBlockMap.getOpenBlocks(containerId);
+    for(BlockData blockData : pendingBlocks) {
+      commitKey(blockData, kvContainer);
     }
   }
 
-  private long commitKey(KeyData keyData, KeyValueContainer kvContainer)
+  private long commitKey(BlockData blockData, KeyValueContainer kvContainer)
       throws IOException {
-    Preconditions.checkNotNull(keyData);
-    long length = keyManager.putKey(kvContainer, keyData);
+    Preconditions.checkNotNull(blockData);
+    long length = blockManager.putBlock(kvContainer, blockData);
     //update the open key Map in containerManager
-    this.openContainerBlockMap.removeFromKeyMap(keyData.getBlockID());
+    this.openContainerBlockMap.removeFromBlockMap(blockData.getBlockID());
     return length;
   }
   /**
-   * Handle Get Key operation. Calls KeyManager to process the request.
+   * Handle Get Block operation. Calls BlockManager to process the request.
    */
-  ContainerCommandResponseProto handleGetKey(
+  ContainerCommandResponseProto handleGetBlock(
       ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
 
-    if (!request.hasGetKey()) {
+    if (!request.hasGetBlock()) {
       LOG.debug("Malformed Get Key request. trace ID: {}",
           request.getTraceID());
       return ContainerUtils.malformedRequest(request);
     }
 
-    KeyData responseData;
+    BlockData responseData;
     try {
       BlockID blockID = BlockID.getFromProtobuf(
-          request.getGetKey().getBlockID());
-      responseData = keyManager.getKey(kvContainer, blockID);
+          request.getGetBlock().getBlockID());
+      responseData = blockManager.getBlock(kvContainer, blockID);
       long numBytes = responseData.getProtoBufMessage().toByteArray().length;
-      metrics.incContainerBytesStats(Type.GetKey, numBytes);
+      metrics.incContainerBytesStats(Type.GetBlock, numBytes);
 
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -506,12 +506,12 @@ public class KeyValueHandler extends Handler {
           request);
     }
 
-    return KeyUtils.getKeyDataResponse(request, responseData);
+    return BlockUtils.getBlockDataResponse(request, responseData);
   }
 
   /**
    * Handles GetCommittedBlockLength operation.
-   * Calls KeyManager to process the request.
+   * Calls BlockManager to process the request.
    */
   ContainerCommandResponseProto handleGetCommittedBlockLength(
       ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
@@ -530,7 +530,7 @@ public class KeyValueHandler extends Handler {
         String msg = "Block " + blockID + " is not committed yet.";
         throw new StorageContainerException(msg, BLOCK_NOT_COMMITTED);
       }
-      blockLength = keyManager.getCommittedBlockLength(kvContainer, blockID);
+      blockLength = blockManager.getCommittedBlockLength(kvContainer, blockID);
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
     } catch (IOException ex) {
@@ -539,16 +539,16 @@ public class KeyValueHandler extends Handler {
               IO_EXCEPTION), request);
     }
 
-    return KeyUtils.getBlockLengthResponse(request, blockLength);
+    return BlockUtils.getBlockLengthResponse(request, blockLength);
   }
 
   /**
-   * Handle Delete Key operation. Calls KeyManager to process the request.
+   * Handle Delete Block operation. Calls BlockManager to process the request.
    */
-  ContainerCommandResponseProto handleDeleteKey(
+  ContainerCommandResponseProto handleDeleteBlock(
       ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
 
-    if (!request.hasDeleteKey()) {
+    if (!request.hasDeleteBlock()) {
       LOG.debug("Malformed Delete Key request. trace ID: {}",
           request.getTraceID());
       return ContainerUtils.malformedRequest(request);
@@ -558,9 +558,9 @@ public class KeyValueHandler extends Handler {
       checkContainerOpen(kvContainer);
 
       BlockID blockID = BlockID.getFromProtobuf(
-          request.getDeleteKey().getBlockID());
+          request.getDeleteBlock().getBlockID());
 
-      keyManager.deleteKey(kvContainer, blockID);
+      blockManager.deleteBlock(kvContainer, blockID);
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
     } catch (IOException ex) {
@@ -569,7 +569,7 @@ public class KeyValueHandler extends Handler {
           request);
     }
 
-    return KeyUtils.getKeyResponseSuccess(request);
+    return BlockUtils.getBlockResponseSuccess(request);
   }
 
   /**
@@ -698,7 +698,7 @@ public class KeyValueHandler extends Handler {
 
   /**
    * Handle Put Small File operation. Writes the chunk and associated key
-   * using a single RPC. Calls KeyManager and ChunkManager to process the
+   * using a single RPC. Calls BlockManager and ChunkManager to process the
    * request.
    */
   ContainerCommandResponseProto handlePutSmallFile(
@@ -715,11 +715,11 @@ public class KeyValueHandler extends Handler {
     try {
       checkContainerOpen(kvContainer);
 
-      BlockID blockID = BlockID.getFromProtobuf(putSmallFileReq.getKey()
-          .getKeyData().getBlockID());
-      KeyData keyData = KeyData.getFromProtoBuf(
-          putSmallFileReq.getKey().getKeyData());
-      Preconditions.checkNotNull(keyData);
+      BlockID blockID = BlockID.getFromProtobuf(putSmallFileReq.getBlock()
+          .getBlockData().getBlockID());
+      BlockData blockData = BlockData.getFromProtoBuf(
+          putSmallFileReq.getBlock().getBlockData());
+      Preconditions.checkNotNull(blockData);
 
       ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(
           putSmallFileReq.getChunkInfo());
@@ -732,8 +732,8 @@ public class KeyValueHandler extends Handler {
 
       List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
       chunks.add(chunkInfo.getProtoBufMessage());
-      keyData.setChunks(chunks);
-      keyManager.putKey(kvContainer, keyData);
+      blockData.setChunks(chunks);
+      blockManager.putBlock(kvContainer, blockData);
       metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
 
     } catch (StorageContainerException ex) {
@@ -749,7 +749,7 @@ public class KeyValueHandler extends Handler {
 
   /**
    * Handle Get Small File operation. Gets a data stream using a key. This
-   * helps in reducing the RPC overhead for small files. Calls KeyManager and
+   * helps in reducing the RPC overhead for small files. Calls BlockManager and
    * ChunkManager to process the request.
    */
   ContainerCommandResponseProto handleGetSmallFile(
@@ -764,9 +764,9 @@ public class KeyValueHandler extends Handler {
     GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile();
 
     try {
-      BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getKey()
+      BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock()
           .getBlockID());
-      KeyData responseData = keyManager.getKey(kvContainer, blockID);
+      BlockData responseData = blockManager.getBlock(kvContainer, blockID);
 
       ContainerProtos.ChunkInfo chunkInfo = null;
       ByteString dataBuf = ByteString.EMPTY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
new file mode 100644
index 0000000..f5cc847
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .GetBlockResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+    GetCommittedBlockLengthResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
+    PutBlockResponseProto;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+    .StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
+import org.apache.hadoop.utils.MetadataStore;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.NO_SUCH_BLOCK;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .Result.UNABLE_TO_READ_METADATA_DB;
+
+/**
+ * Utils functions to help block functions.
+ */
+public final class BlockUtils {
+
+  /** Never constructed. **/
+  private BlockUtils() {
+
+  }
+  /**
+   * Get a DB handler for a given container.
+   * If the handler doesn't exist in cache yet, first create one and
+   * add into cache. This function is called with containerManager
+   * ReadLock held.
+   *
+   * @param containerData containerData.
+   * @param conf configuration.
+   * @return MetadataStore handle.
+   * @throws StorageContainerException
+   */
+  public static MetadataStore getDB(KeyValueContainerData containerData,
+                                    Configuration conf) throws
+      StorageContainerException {
+    Preconditions.checkNotNull(containerData);
+    ContainerCache cache = ContainerCache.getInstance(conf);
+    Preconditions.checkNotNull(cache);
+    Preconditions.checkNotNull(containerData.getDbFile());
+    try {
+      return cache.getDB(containerData.getContainerID(), containerData
+          .getContainerDBType(), containerData.getDbFile().getAbsolutePath());
+    } catch (IOException ex) {
+      String message = String.format("Error opening DB. Container:%s " +
+          "ContainerPath:%s", containerData.getContainerID(), containerData
+          .getDbFile().getPath());
+      throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
+    }
+  }
+  /**
+   * Remove a DB handler from cache.
+   *
+   * @param container - Container data.
+   * @param conf - Configuration.
+   */
+  public static void removeDB(KeyValueContainerData container, Configuration
+      conf) {
+    Preconditions.checkNotNull(container);
+    ContainerCache cache = ContainerCache.getInstance(conf);
+    Preconditions.checkNotNull(cache);
+    cache.removeDB(container.getContainerID());
+  }
+
+  /**
+   * Shutdown all DB Handles.
+   *
+   * @param cache - Cache for DB Handles.
+   */
+  @SuppressWarnings("unchecked")
+  public static void shutdownCache(ContainerCache cache)  {
+    cache.shutdownCache();
+  }
+
+  /**
+   * Parses the {@link BlockData} from a bytes array.
+   *
+   * @param bytes Block data in bytes.
+   * @return Block data.
+   * @throws IOException if the bytes array is malformed or invalid.
+   */
+  public static BlockData getBlockData(byte[] bytes) throws IOException {
+    try {
+      ContainerProtos.BlockData blockData = ContainerProtos.BlockData.parseFrom(
+          bytes);
+      BlockData data = BlockData.getFromProtoBuf(blockData);
+      return data;
+    } catch (IOException e) {
+      throw new StorageContainerException("Failed to parse block data from " +
+          "the bytes array.", NO_SUCH_BLOCK);
+    }
+  }
+
+  /**
+   * Returns putBlock response success.
+   * @param msg - Request.
+   * @return Response.
+   */
+  public static ContainerCommandResponseProto putBlockResponseSuccess(
+      ContainerCommandRequestProto msg, long blockLength) {
+    GetCommittedBlockLengthResponseProto.Builder
+        committedBlockLengthResponseBuilder =
+        getCommittedBlockLengthResponseBuilder(blockLength,
+            msg.getPutBlock().getBlockData().getBlockID());
+    PutBlockResponseProto.Builder putKeyResponse =
+        PutBlockResponseProto.newBuilder();
+    putKeyResponse
+        .setCommittedBlockLength(committedBlockLengthResponseBuilder);
+    ContainerProtos.ContainerCommandResponseProto.Builder builder =
+        ContainerUtils.getSuccessResponseBuilder(msg);
+    builder.setPutBlock(putKeyResponse);
+    return builder.build();
+  }
+  /**
+   * Returns successful blockResponse.
+   * @param msg - Request.
+   * @return Response.
+   */
+  public static ContainerCommandResponseProto getBlockResponseSuccess(
+      ContainerCommandRequestProto msg) {
+    return ContainerUtils.getSuccessResponse(msg);
+  }
+
+
+  public static ContainerCommandResponseProto getBlockDataResponse(
+      ContainerCommandRequestProto msg, BlockData data) {
+    GetBlockResponseProto.Builder getBlock = ContainerProtos
+        .GetBlockResponseProto
+        .newBuilder();
+    getBlock.setBlockData(data.getProtoBufMessage());
+    ContainerProtos.ContainerCommandResponseProto.Builder builder =
+        ContainerUtils.getSuccessResponseBuilder(msg);
+    builder.setGetBlock(getBlock);
+    return  builder.build();
+  }
+
+  /**
+   * Returns successful getCommittedBlockLength Response.
+   * @param msg - Request.
+   * @return Response.
+   */
+  public static ContainerCommandResponseProto getBlockLengthResponse(
+          ContainerCommandRequestProto msg, long blockLength) {
+    GetCommittedBlockLengthResponseProto.Builder
+        committedBlockLengthResponseBuilder =
+        getCommittedBlockLengthResponseBuilder(blockLength,
+            msg.getGetCommittedBlockLength().getBlockID());
+    ContainerProtos.ContainerCommandResponseProto.Builder builder =
+        ContainerUtils.getSuccessResponseBuilder(msg);
+    builder.setGetCommittedBlockLength(committedBlockLengthResponseBuilder);
+    return builder.build();
+  }
+
+  private static GetCommittedBlockLengthResponseProto.Builder
+          getCommittedBlockLengthResponseBuilder(long blockLength,
+      ContainerProtos.DatanodeBlockID blockID) {
+    ContainerProtos.GetCommittedBlockLengthResponseProto.Builder
+        getCommittedBlockLengthResponseBuilder = ContainerProtos.
+        GetCommittedBlockLengthResponseProto.newBuilder();
+    getCommittedBlockLengthResponseBuilder.setBlockLength(blockLength);
+    getCommittedBlockLengthResponseBuilder.setBlockID(blockID);
+    return getCommittedBlockLengthResponseBuilder;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
deleted file mode 100644
index a83d298..0000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyUtils.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.keyvalue.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .GetKeyResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    GetCommittedBlockLengthResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    PutKeyResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.utils.MetadataStore;
-
-import java.io.IOException;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.NO_SUCH_KEY;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.UNABLE_TO_READ_METADATA_DB;
-
-/**
- * Utils functions to help key functions.
- */
-public final class KeyUtils {
-
-  /** Never constructed. **/
-  private KeyUtils() {
-
-  }
-  /**
-   * Get a DB handler for a given container.
-   * If the handler doesn't exist in cache yet, first create one and
-   * add into cache. This function is called with containerManager
-   * ReadLock held.
-   *
-   * @param containerData containerData.
-   * @param conf configuration.
-   * @return MetadataStore handle.
-   * @throws StorageContainerException
-   */
-  public static MetadataStore getDB(KeyValueContainerData containerData,
-                                    Configuration conf) throws
-      StorageContainerException {
-    Preconditions.checkNotNull(containerData);
-    ContainerCache cache = ContainerCache.getInstance(conf);
-    Preconditions.checkNotNull(cache);
-    Preconditions.checkNotNull(containerData.getDbFile());
-    try {
-      return cache.getDB(containerData.getContainerID(), containerData
-          .getContainerDBType(), containerData.getDbFile().getAbsolutePath());
-    } catch (IOException ex) {
-      String message = String.format("Error opening DB. Container:%s " +
-          "ContainerPath:%s", containerData.getContainerID(), containerData
-          .getDbFile().getPath());
-      throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
-    }
-  }
-  /**
-   * Remove a DB handler from cache.
-   *
-   * @param container - Container data.
-   * @param conf - Configuration.
-   */
-  public static void removeDB(KeyValueContainerData container, Configuration
-      conf) {
-    Preconditions.checkNotNull(container);
-    ContainerCache cache = ContainerCache.getInstance(conf);
-    Preconditions.checkNotNull(cache);
-    cache.removeDB(container.getContainerID());
-  }
-
-  /**
-   * Shutdown all DB Handles.
-   *
-   * @param cache - Cache for DB Handles.
-   */
-  @SuppressWarnings("unchecked")
-  public static void shutdownCache(ContainerCache cache)  {
-    cache.shutdownCache();
-  }
-
-  /**
-   * Parses the {@link KeyData} from a bytes array.
-   *
-   * @param bytes key data in bytes.
-   * @return key data.
-   * @throws IOException if the bytes array is malformed or invalid.
-   */
-  public static KeyData getKeyData(byte[] bytes) throws IOException {
-    try {
-      ContainerProtos.KeyData keyData = ContainerProtos.KeyData.parseFrom(
-          bytes);
-      KeyData data = KeyData.getFromProtoBuf(keyData);
-      return data;
-    } catch (IOException e) {
-      throw new StorageContainerException("Failed to parse key data from the" +
-          " bytes array.", NO_SUCH_KEY);
-    }
-  }
-
-  /**
-   * Returns putKey response success.
-   * @param msg - Request.
-   * @return Response.
-   */
-  public static ContainerCommandResponseProto putKeyResponseSuccess(
-      ContainerCommandRequestProto msg, long blockLength) {
-    GetCommittedBlockLengthResponseProto.Builder
-        committedBlockLengthResponseBuilder =
-        getCommittedBlockLengthResponseBuilder(blockLength,
-            msg.getPutKey().getKeyData().getBlockID());
-    PutKeyResponseProto.Builder putKeyResponse =
-        PutKeyResponseProto.newBuilder();
-    putKeyResponse
-        .setCommittedBlockLength(committedBlockLengthResponseBuilder);
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setPutKey(putKeyResponse);
-    return builder.build();
-  }
-  /**
-   * Returns successful keyResponse.
-   * @param msg - Request.
-   * @return Response.
-   */
-  public static ContainerCommandResponseProto getKeyResponseSuccess(
-      ContainerCommandRequestProto msg) {
-    return ContainerUtils.getSuccessResponse(msg);
-  }
-
-
-  public static ContainerCommandResponseProto getKeyDataResponse(
-      ContainerCommandRequestProto msg, KeyData data) {
-    GetKeyResponseProto.Builder getKey = ContainerProtos
-        .GetKeyResponseProto
-        .newBuilder();
-    getKey.setKeyData(data.getProtoBufMessage());
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setGetKey(getKey);
-    return  builder.build();
-  }
-
-  /**
-   * Returns successful getCommittedBlockLength Response.
-   * @param msg - Request.
-   * @return Response.
-   */
-  public static ContainerCommandResponseProto getBlockLengthResponse(
-          ContainerCommandRequestProto msg, long blockLength) {
-    GetCommittedBlockLengthResponseProto.Builder
-        committedBlockLengthResponseBuilder =
-        getCommittedBlockLengthResponseBuilder(blockLength,
-            msg.getGetCommittedBlockLength().getBlockID());
-    ContainerProtos.ContainerCommandResponseProto.Builder builder =
-        ContainerUtils.getSuccessResponseBuilder(msg);
-    builder.setGetCommittedBlockLength(committedBlockLengthResponseBuilder);
-    return builder.build();
-  }
-
-  private static GetCommittedBlockLengthResponseProto.Builder
-  getCommittedBlockLengthResponseBuilder(
-      long blockLength, ContainerProtos.DatanodeBlockID blockID) {
-    ContainerProtos.GetCommittedBlockLengthResponseProto.Builder
-        getCommittedBlockLengthResponseBuilder = ContainerProtos.
-        GetCommittedBlockLengthResponseProto.newBuilder();
-    getCommittedBlockLengthResponseBuilder.setBlockLength(blockLength);
-    getCommittedBlockLengthResponseBuilder.setBlockID(blockID);
-    return getCommittedBlockLengthResponseBuilder;
-  }
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by Ajay Kumar.

Posted by xy...@apache.org.
HDDS-7. Enable kerberos auth for Ozone client in hadoop rpc. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd5b958f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd5b958f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd5b958f

Branch: refs/heads/HDDS-4
Commit: dd5b958fc06294595c1fb70ac9e3e8da2e3c412d
Parents: a9aeff8
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Fri May 18 13:09:17 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Sep 20 15:13:07 2018 -0700

----------------------------------------------------------------------
 .../src/test/compose/compose-secure/.env        | 17 ++++
 .../compose/compose-secure/docker-compose.yaml  | 66 ++++++++++++++
 .../test/compose/compose-secure/docker-config   | 66 ++++++++++++++
 .../acceptance/ozone-secure.robot               | 95 ++++++++++++++++++++
 .../hadoop/ozone/client/rest/RestClient.java    |  4 +-
 .../hadoop/ozone/client/rpc/RpcClient.java      |  6 +-
 6 files changed, 248 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5b958f/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
new file mode 100644
index 0000000..3254735
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/.env
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONEDIR=../../../hadoop-dist/target/ozone
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5b958f/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
new file mode 100644
index 0000000..2661163
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+   ozone.kdc:
+      image: ahadoop/kdc:v1
+   namenode:
+      image: ahadoop/ozone:v1
+      hostname: namenode
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9000:9000
+      environment:
+          ENSURE_NAMENODE_DIR: /data/namenode
+      env_file:
+         - ./docker-config
+      command: ["/opt/hadoop/bin/hdfs","namenode"]
+   datanode:
+      image: ahadoop/ozone:v1
+      hostname: datanode
+      volumes:
+        - ${OZONEDIR}:/opt/hadoop
+      ports:
+        - 9874
+      env_file:
+        - ./docker-config
+      command: ["/opt/hadoop/bin/ozone","datanode"]
+   ksm:
+      image: ahadoop/ozone:v1
+      hostname: ksm
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9874:9874
+      environment:
+         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+      env_file:
+          - ./docker-config
+      command: ["/opt/hadoop/bin/ozone","ksm"]
+   scm:
+      image: ahadoop/ozone:v1
+      hostname: scm
+      volumes:
+         - ${OZONEDIR}:/opt/hadoop
+      ports:
+         - 9876:9876
+      env_file:
+          - ./docker-config
+      environment:
+          ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+      command: ["/opt/hadoop/bin/ozone","scm"]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5b958f/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
new file mode 100644
index 0000000..678c75a
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-config
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.scm.names=scm
+OZONE-SITE.XML_ozone.enabled=True
+OZONE-SITE.XML_hdds.scm.datanode.id=/data/datanode.id
+OZONE-SITE.XML_hdds.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
+OZONE-SITE.XML_ozone.handler.type=distributed
+OZONE-SITE.XML_hdds.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+OZONE-SITE.XML_hdds.scm.kerberos.principal=scm/scm@EXAMPLE.COM
+OZONE-SITE.XML_hdds.scm.kerberos.keytab.file=/etc/security/keytabs/scm.keytab
+OZONE-SITE.XML_ozone.ksm.kerberos.principal=ksm/ksm@EXAMPLE.COM
+OZONE-SITE.XML_ozone.ksm.kerberos.keytab.file=/etc/security/keytabs/ksm.keytab
+OZONE-SITE.XML_ozone.security.enabled=true
+OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.principal=HTTP/scm@EXAMPLE.COM
+OZONE-SITE.XML_hdds.scm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.principal=HTTP/ksm@EXAMPLE.COM
+OZONE-SITE.XML_ozone.ksm.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+OZONE-SITE.XML_ozone.scm.block.client.address=scm
+OZONE-SITE.XML_ozone.scm.client.address=scm
+HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
+HDFS-SITE.XML_dfs.block.access.token.enable=true
+HDFS-SITE.XML_dfs.namenode.kerberos.principal=nn/namenode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.namenode.keytab.file=/etc/security/keytabs/nn.keytab
+HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/datanode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
+HDFS-SITE.XML_dfs.namenode.kerberos.internal.spnego.principal=HTTP/namenode@EXAMPLE.COM
+HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
+HDFS-SITE.XML_dfs.web.authentication.kerberos.keytab=/etc/security/keytabs/HTTP.keytab
+HDFS-SITE.XML_dfs.datanode.address=0.0.0.0:1019
+HDFS-SITE.XML_dfs.datanode.http.address=0.0.0.0:1012
+HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
+CORE-SITE.XML_dfs.data.transfer.protection=authentication
+CORE-SITE.XML_hadoop.security.authentication=kerberos
+CORE-SITE.XML_hadoop.security.auth_to_local=RULE:[2:$1@$0](.*)s/.*/root/
+LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
+LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
+
+OZONE_DATANODE_SECURE_USER=root
+CONF_DIR=/etc/security/keytabs
+KERBEROS_KEYTABS=dn nn ksm scm HTTP testuser
+KERBEROS_KEYSTORES=hadoop
+KERBEROS_SERVER=ozone.kdc
+JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
+JSVC_HOME=/usr/bin
+SLEEP_SECONDS=10
+KERBEROS_ENABLED=true

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5b958f/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
new file mode 100644
index 0000000..4a78980
--- /dev/null
+++ b/hadoop-ozone/acceptance-test/src/test/robotframework/acceptance/ozone-secure.robot
@@ -0,0 +1,95 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation       Smoke test to start cluster with docker-compose environments.
+Library             OperatingSystem
+Suite Setup         Startup Ozone Cluster
+Suite Teardown      Teardown Ozone Cluster
+
+*** Variables ***
+${COMMON_REST_HEADER}   -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H  "Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root"
+${version}
+
+*** Test Cases ***
+
+Daemons are running
+    Is daemon running           ksm
+    Is daemon running           scm
+    Is daemon running           datanode
+    Is daemon running           ozone.kdc
+
+Check if datanode is connected to the scm
+    Wait Until Keyword Succeeds     3min    5sec    Have healthy datanodes   1
+
+Test rest interface
+    ${result} =     Execute on      0   datanode        curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
+                    Should contain      ${result}       201 Created
+    ${result} =     Execute on      0   datanode        curl -i -X POST ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
+                    Should contain      ${result}       201 Created
+    ${result} =     Execute on      0   datanode        curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1/bucket1"
+                    Should contain      ${result}       200 OK
+    ${result} =     Execute on      0   datanode        curl -i -X DELETE ${COMMON_RESTHEADER} "http://localhost:9880/volume1"
+                    Should contain      ${result}       200 OK
+
+Test ozone cli
+    ${result} =     Execute on      1   datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+                    Should contain      ${result}       Client cannot authenticate via
+                    # Authenticate testuser
+                    Execute on      0   datanode        kinit -k testuser/datanode@EXAMPLE.COM -t /etc/security/keytabs/testuser.keytab
+                    Execute on      0   datanode        ozone oz -createVolume o3://ksm/hive -user bilbo -quota 100TB -root
+    ${result} =     Execute on      0   datanode        ozone oz -listVolume o3://ksm/ -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="hive")'
+                    Should contain      ${result}       createdOn
+                    Execute on      0   datanode        ozone oz -updateVolume o3://ksm/hive -user bill -quota 10TB
+    ${result} =     Execute on      0   datanode        ozone oz -infoVolume o3://ksm/hive | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="hive") | .owner | .name'
+                    Should Be Equal     ${result}       bill
+
+*** Keywords ***
+
+Startup Ozone Cluster
+    ${rc}       ${output} =                 Run docker compose        0     down
+    ${rc}       ${output} =                 Run docker compose        0     up -d
+    Should Be Equal As Integers 	          ${rc} 	                  0
+    Wait Until Keyword Succeeds             3min    10sec    Is Daemon started   ksm     KSM is listening
+
+Teardown Ozone Cluster
+    Run docker compose      0           down
+
+Is daemon running
+    [arguments]             ${name}
+    ${result} =             Run                     docker ps
+    Should contain          ${result}               _${name}_1
+
+Is Daemon started
+    [arguments]     ${name}             ${expression}
+    ${rc}           ${result} =         Run docker compose      0         logs
+    Should contain  ${result}           ${expression}
+
+Have healthy datanodes
+    [arguments]         ${requirednodes}
+    ${result} =         Execute on     0     scm                 curl -s 'http://localhost:9876/jmx?qry=Hadoop:service=SCMNodeManager,name=SCMNodeManagerInfo' | jq -r '.beans[0].NodeCount[] | select(.key=="HEALTHY") | .value'
+    Should Be Equal     ${result}           ${requirednodes}
+
+Execute on
+    [arguments]     ${expected_rc}      ${componentname}      ${command}
+    ${rc}           ${return} =         Run docker compose    ${expected_rc}     exec ${componentname} ${command}
+    [return]        ${return}
+
+Run docker compose
+    [arguments]                     ${expected_rc}              ${command}
+                                    Set Environment Variable    OZONEDIR                               ${basedir}/hadoop-dist/target/ozone
+    ${rc}                           ${output} =                 Run And Return Rc And Output           docker-compose -f ${basedir}/hadoop-ozone/acceptance-test/src/test/compose/compose-secure/docker-compose.yaml ${command}
+    Should Be Equal As Integers     ${rc}                       ${expected_rc}
+    [return]                        ${rc}                       ${output}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5b958f/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index fdd049a..f5eb9fd 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -111,7 +111,7 @@ public class RestClient implements ClientProtocol {
     try {
       Preconditions.checkNotNull(conf);
       this.conf = conf;
-
+      this.ugi = UserGroupInformation.getCurrentUser();
       long socketTimeout = conf.getTimeDuration(
           OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
           OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
@@ -147,7 +147,7 @@ public class RestClient implements ClientProtocol {
                   .setConnectTimeout(Math.toIntExact(connectionTimeout))
                   .build())
           .build();
-      this.ugi = UserGroupInformation.getCurrentUser();
+
       this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
           OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5b958f/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 330eba8..24bc043 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -117,8 +117,7 @@ public class RpcClient implements ClientProtocol {
     this.ozoneManagerClient =
         new OzoneManagerProtocolClientSideTranslatorPB(
             RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
-                omAddress, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
+                omAddress, ugi, conf, NetUtils.getDefaultSocketFactory(conf),
                 Client.getRpcTimeout(conf)));
 
     long scmVersion =
@@ -129,8 +128,7 @@ public class RpcClient implements ClientProtocol {
     this.storageContainerLocationClient =
         new StorageContainerLocationProtocolClientSideTranslatorPB(
             RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
-                scmAddress, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
+                scmAddress, ugi, conf, NetUtils.getDefaultSocketFactory(conf),
                 Client.getRpcTimeout(conf)));
 
     this.xceiverClientManager = new XceiverClientManager(conf);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HADOOP-15748. S3 listing inconsistency can raise NPE in globber. Contributed by Steve Loughran.

Posted by xy...@apache.org.
HADOOP-15748. S3 listing inconsistency can raise NPE in globber.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/646874c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/646874c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/646874c3

Branch: refs/heads/HDDS-4
Commit: 646874c326139457b79cf8cfa547b3c91a78c7b4
Parents: 7ad27e9
Author: Steve Loughran <st...@apache.org>
Authored: Thu Sep 20 13:04:52 2018 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Thu Sep 20 13:04:52 2018 +0100

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/Globber.java    | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/646874c3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
index ca3db1d..b241a94 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
@@ -245,7 +245,18 @@ class Globber {
               // incorrectly conclude that /a/b was a file and should not match
               // /a/*/*.  So we use getFileStatus of the path we just listed to
               // disambiguate.
-              if (!getFileStatus(candidate.getPath()).isDirectory()) {
+              Path path = candidate.getPath();
+              FileStatus status = getFileStatus(path);
+              if (status == null) {
+                // null means the file was not found
+                LOG.warn("File/directory {} not found:"
+                    + " it may have been deleted."
+                    + " If this is an object store, this can be a sign of"
+                    + " eventual consistency problems.",
+                    path);
+                continue;
+              }
+              if (!status.isDirectory()) {
                 continue;
               }
             }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org