You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2018/07/09 18:25:53 UTC

[01/50] [abbrv] hadoop git commit: YARN-8485. Priviledged container app launch is failing intermittently. Contributed by Eric Yang

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12090 9d7a9031a -> b210ee3ec


YARN-8485. Priviledged container app launch is failing intermittently. Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53e267fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53e267fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53e267fa

Branch: refs/heads/HDFS-12090
Commit: 53e267fa7232add3c21174382d91b2607aa6becf
Parents: ab2f834
Author: Shane Kumpf <sk...@apache.org>
Authored: Mon Jul 2 16:18:32 2018 -0600
Committer: Shane Kumpf <sk...@apache.org>
Committed: Mon Jul 2 16:18:32 2018 -0600

----------------------------------------------------------------------
 .../src/main/native/container-executor/impl/utils/docker-util.c    | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e267fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
index ffc349a..d364227 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.c
@@ -1235,7 +1235,7 @@ static int check_privileges(const char *user) {
   if (ret != 1) {
     int child_pid = fork();
     if (child_pid == 0) {
-      execl("/bin/sudo", "sudo", "-U", user, "-n", "-l", "docker", NULL);
+      execl("/usr/bin/sudo", "sudo", "-U", user, "-n", "-l", "docker", NULL);
       exit(INITIALIZE_USER_FAILED);
     } else {
       while ((waitid = waitpid(child_pid, &statval, 0)) != child_pid) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
new file mode 100644
index 0000000..2d04452
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -0,0 +1,459 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+
+/**
+ * This class is for maintaining Ozone Manager statistics.
+ */
+@InterfaceAudience.Private
+@Metrics(about="Ozone Manager Metrics", context="dfs")
+public class OMMetrics {
+  private static final String SOURCE_NAME =
+      OMMetrics.class.getSimpleName();
+
+  // OM request type op metrics
+  private @Metric MutableCounterLong numVolumeOps;
+  private @Metric MutableCounterLong numBucketOps;
+  private @Metric MutableCounterLong numKeyOps;
+
+  // OM op metrics
+  private @Metric MutableCounterLong numVolumeCreates;
+  private @Metric MutableCounterLong numVolumeUpdates;
+  private @Metric MutableCounterLong numVolumeInfos;
+  private @Metric MutableCounterLong numVolumeCheckAccesses;
+  private @Metric MutableCounterLong numBucketCreates;
+  private @Metric MutableCounterLong numVolumeDeletes;
+  private @Metric MutableCounterLong numBucketInfos;
+  private @Metric MutableCounterLong numBucketUpdates;
+  private @Metric MutableCounterLong numBucketDeletes;
+  private @Metric MutableCounterLong numKeyAllocate;
+  private @Metric MutableCounterLong numKeyLookup;
+  private @Metric MutableCounterLong numKeyRenames;
+  private @Metric MutableCounterLong numKeyDeletes;
+  private @Metric MutableCounterLong numBucketLists;
+  private @Metric MutableCounterLong numKeyLists;
+  private @Metric MutableCounterLong numVolumeLists;
+  private @Metric MutableCounterLong numKeyCommits;
+  private @Metric MutableCounterLong numAllocateBlockCalls;
+  private @Metric MutableCounterLong numGetServiceLists;
+
+  // Failure Metrics
+  private @Metric MutableCounterLong numVolumeCreateFails;
+  private @Metric MutableCounterLong numVolumeUpdateFails;
+  private @Metric MutableCounterLong numVolumeInfoFails;
+  private @Metric MutableCounterLong numVolumeDeleteFails;
+  private @Metric MutableCounterLong numBucketCreateFails;
+  private @Metric MutableCounterLong numVolumeCheckAccessFails;
+  private @Metric MutableCounterLong numBucketInfoFails;
+  private @Metric MutableCounterLong numBucketUpdateFails;
+  private @Metric MutableCounterLong numBucketDeleteFails;
+  private @Metric MutableCounterLong numKeyAllocateFails;
+  private @Metric MutableCounterLong numKeyLookupFails;
+  private @Metric MutableCounterLong numKeyRenameFails;
+  private @Metric MutableCounterLong numKeyDeleteFails;
+  private @Metric MutableCounterLong numBucketListFails;
+  private @Metric MutableCounterLong numKeyListFails;
+  private @Metric MutableCounterLong numVolumeListFails;
+  private @Metric MutableCounterLong numKeyCommitFails;
+  private @Metric MutableCounterLong numBlockAllocateCallFails;
+  private @Metric MutableCounterLong numGetServiceListFails;
+
+  public OMMetrics() {
+  }
+
+  public static OMMetrics create() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    return ms.register(SOURCE_NAME,
+        "Oozne Manager Metrics",
+        new OMMetrics());
+  }
+
+  public void incNumVolumeCreates() {
+    numVolumeOps.incr();
+    numVolumeCreates.incr();
+  }
+
+  public void incNumVolumeUpdates() {
+    numVolumeOps.incr();
+    numVolumeUpdates.incr();
+  }
+
+  public void incNumVolumeInfos() {
+    numVolumeOps.incr();
+    numVolumeInfos.incr();
+  }
+
+  public void incNumVolumeDeletes() {
+    numVolumeOps.incr();
+    numVolumeDeletes.incr();
+  }
+
+  public void incNumVolumeCheckAccesses() {
+    numVolumeOps.incr();
+    numVolumeCheckAccesses.incr();
+  }
+
+  public void incNumBucketCreates() {
+    numBucketOps.incr();
+    numBucketCreates.incr();
+  }
+
+  public void incNumBucketInfos() {
+    numBucketOps.incr();
+    numBucketInfos.incr();
+  }
+
+  public void incNumBucketUpdates() {
+    numBucketOps.incr();
+    numBucketUpdates.incr();
+  }
+
+  public void incNumBucketDeletes() {
+    numBucketOps.incr();
+    numBucketDeletes.incr();
+  }
+
+  public void incNumBucketLists() {
+    numBucketOps.incr();
+    numBucketLists.incr();
+  }
+
+  public void incNumKeyLists() {
+    numKeyOps.incr();
+    numKeyLists.incr();
+  }
+
+  public void incNumVolumeLists() {
+    numVolumeOps.incr();
+    numVolumeLists.incr();
+  }
+
+  public void incNumGetServiceLists() {
+    numGetServiceLists.incr();
+  }
+
+  public void incNumVolumeCreateFails() {
+    numVolumeCreateFails.incr();
+  }
+
+  public void incNumVolumeUpdateFails() {
+    numVolumeUpdateFails.incr();
+  }
+
+  public void incNumVolumeInfoFails() {
+    numVolumeInfoFails.incr();
+  }
+
+  public void incNumVolumeDeleteFails() {
+    numVolumeDeleteFails.incr();
+  }
+
+  public void incNumVolumeCheckAccessFails() {
+    numVolumeCheckAccessFails.incr();
+  }
+
+  public void incNumBucketCreateFails() {
+    numBucketCreateFails.incr();
+  }
+
+  public void incNumBucketInfoFails() {
+    numBucketInfoFails.incr();
+  }
+
+  public void incNumBucketUpdateFails() {
+    numBucketUpdateFails.incr();
+  }
+
+  public void incNumBucketDeleteFails() {
+    numBucketDeleteFails.incr();
+  }
+
+  public void incNumKeyAllocates() {
+    numKeyOps.incr();
+    numKeyAllocate.incr();
+  }
+
+  public void incNumKeyAllocateFails() {
+    numKeyAllocateFails.incr();
+  }
+
+  public void incNumKeyLookups() {
+    numKeyOps.incr();
+    numKeyLookup.incr();
+  }
+
+  public void incNumKeyLookupFails() {
+    numKeyLookupFails.incr();
+  }
+
+  public void incNumKeyRenames() {
+    numKeyOps.incr();
+    numKeyRenames.incr();
+  }
+
+  public void incNumKeyRenameFails() {
+    numKeyOps.incr();
+    numKeyRenameFails.incr();
+  }
+
+  public void incNumKeyDeleteFails() {
+    numKeyDeleteFails.incr();
+  }
+
+  public void incNumKeyDeletes() {
+    numKeyOps.incr();
+    numKeyDeletes.incr();
+  }
+
+  public void incNumKeyCommits() {
+    numKeyOps.incr();
+    numKeyCommits.incr();
+  }
+
+  public void incNumKeyCommitFails() {
+    numKeyCommitFails.incr();
+  }
+
+  public void incNumBlockAllocateCalls() {
+    numAllocateBlockCalls.incr();
+  }
+
+  public void incNumBlockAllocateCallFails() {
+    numBlockAllocateCallFails.incr();
+  }
+
+  public void incNumBucketListFails() {
+    numBucketListFails.incr();
+  }
+
+  public void incNumKeyListFails() {
+    numKeyListFails.incr();
+  }
+
+  public void incNumVolumeListFails() {
+    numVolumeListFails.incr();
+  }
+
+  public void incNumGetServiceListFails() {
+    numGetServiceListFails.incr();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeCreates() {
+    return numVolumeCreates.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeUpdates() {
+    return numVolumeUpdates.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeInfos() {
+    return numVolumeInfos.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeDeletes() {
+    return numVolumeDeletes.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeCheckAccesses() {
+    return numVolumeCheckAccesses.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketCreates() {
+    return numBucketCreates.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketInfos() {
+    return numBucketInfos.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketUpdates() {
+    return numBucketUpdates.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketDeletes() {
+    return numBucketDeletes.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketLists() {
+    return numBucketLists.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeLists() {
+    return numVolumeLists.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyLists() {
+    return numKeyLists.value();
+  }
+
+  @VisibleForTesting
+  public long getNumGetServiceLists() {
+    return numGetServiceLists.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeCreateFails() {
+    return numVolumeCreateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeUpdateFails() {
+    return numVolumeUpdateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeInfoFails() {
+    return numVolumeInfoFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeDeleteFails() {
+    return numVolumeDeleteFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeCheckAccessFails() {
+    return numVolumeCheckAccessFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketCreateFails() {
+    return numBucketCreateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketInfoFails() {
+    return numBucketInfoFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketUpdateFails() {
+    return numBucketUpdateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketDeleteFails() {
+    return numBucketDeleteFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyAllocates() {
+    return numKeyAllocate.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyAllocateFails() {
+    return numKeyAllocateFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyLookups() {
+    return numKeyLookup.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyLookupFails() {
+    return numKeyLookupFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyRenames() {
+    return numKeyRenames.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyRenameFails() {
+    return numKeyRenameFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyDeletes() {
+    return numKeyDeletes.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyDeletesFails() {
+    return numKeyDeleteFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBucketListFails() {
+    return numBucketListFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyListFails() {
+    return numKeyListFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumVolumeListFails() {
+    return numVolumeListFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyCommits() {
+    return numKeyCommits.value();
+  }
+
+  @VisibleForTesting
+  public long getNumKeyCommitFails() {
+    return numKeyCommitFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBlockAllocates() {
+    return numAllocateBlockCalls.value();
+  }
+
+  @VisibleForTesting
+  public long getNumBlockAllocateFails() {
+    return numBlockAllocateCallFails.value();
+  }
+
+  @VisibleForTesting
+  public long getNumGetServiceListFails() {
+    return numGetServiceListFails.value();
+  }
+
+  public void unRegister() {
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    ms.unregisterSource(SOURCE_NAME);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
new file mode 100644
index 0000000..3820aed
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import java.io.IOException;
+import java.util.Properties;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+
+import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+
+/**
+ * OMStorage is responsible for management of the StorageDirectories used by
+ * the Ozone Manager.
+ */
+public class OMStorage extends Storage {
+
+  public static final String STORAGE_DIR = "om";
+  public static final String OM_ID = "omUuid";
+
+  /**
+   * Construct OMStorage.
+   * @throws IOException if any directories are inaccessible.
+   */
+  public OMStorage(OzoneConfiguration conf) throws IOException {
+    super(NodeType.OM, getOzoneMetaDirPath(conf), STORAGE_DIR);
+  }
+
+  public void setScmId(String scmId) throws IOException {
+    if (getState() == StorageState.INITIALIZED) {
+      throw new IOException("OM is already initialized.");
+    } else {
+      getStorageInfo().setProperty(SCM_ID, scmId);
+    }
+  }
+
+  public void setOmId(String omId) throws IOException {
+    if (getState() == StorageState.INITIALIZED) {
+      throw new IOException("OM is already initialized.");
+    } else {
+      getStorageInfo().setProperty(OM_ID, omId);
+    }
+  }
+
+  /**
+   * Retrieves the SCM ID from the version file.
+   * @return SCM_ID
+   */
+  public String getScmId() {
+    return getStorageInfo().getProperty(SCM_ID);
+  }
+
+  /**
+   * Retrieves the OM ID from the version file.
+   * @return OM_ID
+   */
+  public String getOmId() {
+    return getStorageInfo().getProperty(OM_ID);
+  }
+
+  @Override
+  protected Properties getNodeProperties() {
+    String omId = getOmId();
+    if (omId == null) {
+      omId = UUID.randomUUID().toString();
+    }
+    Properties omProperties = new Properties();
+    omProperties.setProperty(OM_ID, omId);
+    return omProperties;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
new file mode 100644
index 0000000..21d2411
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -0,0 +1,526 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
+
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BatchOperation;
+import org.apache.hadoop.utils.MetadataKeyFilters;
+import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
+import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
+import org.apache.hadoop.utils.MetadataStore;
+import org.apache.hadoop.utils.MetadataStoreBuilder;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_ID_DELIMINATOR;
+import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_DB_CACHE_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_DB_CACHE_SIZE_MB;
+import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
+
+/**
+ * Ozone metadata manager interface.
+ */
+public class OmMetadataManagerImpl implements OMMetadataManager {
+
+  private final MetadataStore store;
+  private final ReadWriteLock lock;
+  private final long openKeyExpireThresholdMS;
+
+  public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
+    File metaDir = getOzoneMetaDirPath(conf);
+    final int cacheSize = conf.getInt(OZONE_OM_DB_CACHE_SIZE_MB,
+        OZONE_OM_DB_CACHE_SIZE_DEFAULT);
+    File omDBFile = new File(metaDir.getPath(), OM_DB_NAME);
+    this.store = MetadataStoreBuilder.newBuilder()
+        .setConf(conf)
+        .setDbFile(omDBFile)
+        .setCacheSize(cacheSize * OzoneConsts.MB)
+        .build();
+    this.lock = new ReentrantReadWriteLock();
+    this.openKeyExpireThresholdMS = 1000 * conf.getInt(
+        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
+        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
+  }
+
+  /**
+   * Start metadata manager.
+   */
+  @Override
+  public void start() {
+
+  }
+
+  /**
+   * Stop metadata manager.
+   */
+  @Override
+  public void stop() throws IOException {
+    if (store != null) {
+      store.close();
+    }
+  }
+
+  /**
+   * Get metadata store.
+   * @return store - metadata store.
+   */
+  @VisibleForTesting
+  @Override
+  public MetadataStore getStore() {
+    return store;
+  }
+
+  /**
+   * Given a volume return the corresponding DB key.
+   * @param volume - Volume name
+   */
+  public byte[] getVolumeKey(String volume) {
+    String dbVolumeName = OzoneConsts.OM_VOLUME_PREFIX + volume;
+    return DFSUtil.string2Bytes(dbVolumeName);
+  }
+
+  /**
+   * Given a user return the corresponding DB key.
+   * @param user - User name
+   */
+  public byte[] getUserKey(String user) {
+    String dbUserName = OzoneConsts.OM_USER_PREFIX + user;
+    return DFSUtil.string2Bytes(dbUserName);
+  }
+
+  /**
+   * Given a volume and bucket, return the corresponding DB key.
+   * @param volume - User name
+   * @param bucket - Bucket name
+   */
+  public byte[] getBucketKey(String volume, String bucket) {
+    String bucketKeyString = OzoneConsts.OM_VOLUME_PREFIX + volume
+        + OzoneConsts.OM_BUCKET_PREFIX + bucket;
+    return DFSUtil.string2Bytes(bucketKeyString);
+  }
+
+  /**
+   * @param volume
+   * @param bucket
+   * @return
+   */
+  private String getBucketWithDBPrefix(String volume, String bucket) {
+    StringBuffer sb = new StringBuffer();
+    sb.append(OzoneConsts.OM_VOLUME_PREFIX)
+        .append(volume)
+        .append(OzoneConsts.OM_BUCKET_PREFIX);
+    if (!Strings.isNullOrEmpty(bucket)) {
+      sb.append(bucket);
+    }
+    return sb.toString();
+  }
+
+  @Override
+  public String getKeyWithDBPrefix(String volume, String bucket, String key) {
+    String keyVB = OzoneConsts.OM_KEY_PREFIX + volume
+        + OzoneConsts.OM_KEY_PREFIX + bucket
+        + OzoneConsts.OM_KEY_PREFIX;
+    return Strings.isNullOrEmpty(key) ? keyVB : keyVB + key;
+  }
+
+  @Override
+  public byte[] getDBKeyBytes(String volume, String bucket, String key) {
+    return DFSUtil.string2Bytes(getKeyWithDBPrefix(volume, bucket, key));
+  }
+
+  @Override
+  public byte[] getDeletedKeyName(byte[] keyName) {
+    return DFSUtil.string2Bytes(
+        DELETING_KEY_PREFIX + DFSUtil.bytes2String(keyName));
+  }
+
+  @Override
+  public byte[] getOpenKeyNameBytes(String keyName, int id) {
+    return DFSUtil.string2Bytes(OPEN_KEY_PREFIX + id +
+        OPEN_KEY_ID_DELIMINATOR + keyName);
+  }
+
+  /**
+   * Returns the read lock used on Metadata DB.
+   * @return readLock
+   */
+  @Override
+  public Lock readLock() {
+    return lock.readLock();
+  }
+
+  /**
+   * Returns the write lock used on Metadata DB.
+   * @return writeLock
+   */
+  @Override
+  public Lock writeLock() {
+    return lock.writeLock();
+  }
+
+  /**
+   * Returns the value associated with this key.
+   * @param key - key
+   * @return value
+   */
+  @Override
+  public byte[] get(byte[] key) throws IOException {
+    return store.get(key);
+  }
+
+  /**
+   * Puts a Key into Metadata DB.
+   * @param key   - key
+   * @param value - value
+   */
+  @Override
+  public void put(byte[] key, byte[] value) throws IOException {
+    store.put(key, value);
+  }
+
+  /**
+   * Deletes a Key from Metadata DB.
+   * @param key   - key
+   */
+  public void delete(byte[] key) throws IOException {
+    store.delete(key);
+  }
+
+  @Override
+  public void writeBatch(BatchOperation batch) throws IOException {
+    this.store.writeBatch(batch);
+  }
+
+  /**
+   * Given a volume, check if it is empty, i.e there are no buckets inside it.
+   * @param volume - Volume name
+   * @return true if the volume is empty
+   */
+  public boolean isVolumeEmpty(String volume) throws IOException {
+    String dbVolumeRootName = OzoneConsts.OM_VOLUME_PREFIX + volume
+        + OzoneConsts.OM_BUCKET_PREFIX;
+    byte[] dbVolumeRootKey = DFSUtil.string2Bytes(dbVolumeRootName);
+    ImmutablePair<byte[], byte[]> volumeRoot =
+        store.peekAround(0, dbVolumeRootKey);
+    if (volumeRoot != null) {
+      return !DFSUtil.bytes2String(volumeRoot.getKey())
+          .startsWith(dbVolumeRootName);
+    }
+    return true;
+  }
+
+  /**
+   * Given a volume/bucket, check if it is empty,
+   * i.e there are no keys inside it.
+   * @param volume - Volume name
+   * @param bucket - Bucket name
+   * @return true if the bucket is empty
+   */
+  public boolean isBucketEmpty(String volume, String bucket)
+      throws IOException {
+    String keyRootName = getKeyWithDBPrefix(volume, bucket, null);
+    byte[] keyRoot = DFSUtil.string2Bytes(keyRootName);
+    ImmutablePair<byte[], byte[]> firstKey = store.peekAround(0, keyRoot);
+    if (firstKey != null) {
+      return !DFSUtil.bytes2String(firstKey.getKey())
+          .startsWith(keyRootName);
+    }
+    return true;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmBucketInfo> listBuckets(final String volumeName,
+                                        final String startBucket, final String bucketPrefix,
+                                        final int maxNumOfBuckets) throws IOException {
+    List<OmBucketInfo> result = new ArrayList<>();
+    if (Strings.isNullOrEmpty(volumeName)) {
+      throw new OMException("Volume name is required.",
+          ResultCodes.FAILED_VOLUME_NOT_FOUND);
+    }
+
+    byte[] volumeNameBytes = getVolumeKey(volumeName);
+    if (store.get(volumeNameBytes) == null) {
+      throw new OMException("Volume " + volumeName + " not found.",
+          ResultCodes.FAILED_VOLUME_NOT_FOUND);
+    }
+
+
+    // A bucket starts with /#volume/#bucket_prefix
+    MetadataKeyFilter filter = (preKey, currentKey, nextKey) -> {
+      if (currentKey != null) {
+        String bucketNamePrefix =
+                getBucketWithDBPrefix(volumeName, bucketPrefix);
+        String bucket = DFSUtil.bytes2String(currentKey);
+        return bucket.startsWith(bucketNamePrefix);
+      }
+      return false;
+    };
+
+    List<Map.Entry<byte[], byte[]>> rangeResult;
+    if (!Strings.isNullOrEmpty(startBucket)) {
+      // Since we are excluding start key from the result,
+      // the maxNumOfBuckets is incremented.
+      rangeResult = store.getSequentialRangeKVs(
+          getBucketKey(volumeName, startBucket),
+          maxNumOfBuckets + 1, filter);
+      if (!rangeResult.isEmpty()) {
+        //Remove start key from result.
+        rangeResult.remove(0);
+      }
+    } else {
+      rangeResult = store.getSequentialRangeKVs(null, maxNumOfBuckets, filter);
+    }
+
+    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
+      OmBucketInfo info = OmBucketInfo.getFromProtobuf(
+          BucketInfo.parseFrom(entry.getValue()));
+      result.add(info);
+    }
+    return result;
+  }
+
+  @Override
+  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+                                  String startKey, String keyPrefix, int maxKeys) throws IOException {
+    List<OmKeyInfo> result = new ArrayList<>();
+    if (Strings.isNullOrEmpty(volumeName)) {
+      throw new OMException("Volume name is required.",
+          ResultCodes.FAILED_VOLUME_NOT_FOUND);
+    }
+
+    if (Strings.isNullOrEmpty(bucketName)) {
+      throw new OMException("Bucket name is required.",
+          ResultCodes.FAILED_BUCKET_NOT_FOUND);
+    }
+
+    byte[] bucketNameBytes = getBucketKey(volumeName, bucketName);
+    if (store.get(bucketNameBytes) == null) {
+      throw new OMException("Bucket " + bucketName + " not found.",
+          ResultCodes.FAILED_BUCKET_NOT_FOUND);
+    }
+
+    MetadataKeyFilter filter = new KeyPrefixFilter()
+        .addFilter(getKeyWithDBPrefix(volumeName, bucketName, keyPrefix));
+
+    List<Map.Entry<byte[], byte[]>> rangeResult;
+    if (!Strings.isNullOrEmpty(startKey)) {
+      //Since we are excluding start key from the result,
+      // the maxNumOfBuckets is incremented.
+      rangeResult = store.getSequentialRangeKVs(
+          getDBKeyBytes(volumeName, bucketName, startKey),
+          maxKeys + 1, filter);
+      if (!rangeResult.isEmpty()) {
+        //Remove start key from result.
+        rangeResult.remove(0);
+      }
+    } else {
+      rangeResult = store.getSequentialRangeKVs(null, maxKeys, filter);
+    }
+
+    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
+      OmKeyInfo info = OmKeyInfo.getFromProtobuf(
+          KeyInfo.parseFrom(entry.getValue()));
+      result.add(info);
+    }
+    return result;
+  }
+
+  @Override
+  public List<OmVolumeArgs> listVolumes(String userName,
+                                        String prefix, String startKey, int maxKeys) throws IOException {
+    List<OmVolumeArgs> result = Lists.newArrayList();
+    VolumeList volumes;
+    if (Strings.isNullOrEmpty(userName)) {
+      volumes = getAllVolumes();
+    } else {
+      volumes = getVolumesByUser(userName);
+    }
+
+    if (volumes == null || volumes.getVolumeNamesCount() == 0) {
+      return result;
+    }
+
+    boolean startKeyFound = Strings.isNullOrEmpty(startKey);
+    for (String volumeName : volumes.getVolumeNamesList()) {
+      if (!Strings.isNullOrEmpty(prefix)) {
+        if (!volumeName.startsWith(prefix)) {
+          continue;
+        }
+      }
+
+      if (!startKeyFound && volumeName.equals(startKey)) {
+        startKeyFound = true;
+        continue;
+      }
+      if (startKeyFound && result.size() < maxKeys) {
+        byte[] volumeInfo = store.get(this.getVolumeKey(volumeName));
+        if (volumeInfo == null) {
+          // Could not get volume info by given volume name,
+          // since the volume name is loaded from db,
+          // this probably means om db is corrupted or some entries are
+          // accidentally removed.
+          throw new OMException("Volume info not found for " + volumeName,
+              ResultCodes.FAILED_VOLUME_NOT_FOUND);
+        }
+        VolumeInfo info = VolumeInfo.parseFrom(volumeInfo);
+        OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(info);
+        result.add(volumeArgs);
+      }
+    }
+
+    return result;
+  }
+
+  private VolumeList getVolumesByUser(String userName)
+      throws OMException {
+    return getVolumesByUser(getUserKey(userName));
+  }
+
+  private VolumeList getVolumesByUser(byte[] userNameKey)
+      throws OMException {
+    VolumeList volumes = null;
+    try {
+      byte[] volumesInBytes = store.get(userNameKey);
+      if (volumesInBytes == null) {
+        // No volume found for this user, return an empty list
+        return VolumeList.newBuilder().build();
+      }
+      volumes = VolumeList.parseFrom(volumesInBytes);
+    } catch (IOException e) {
+      throw new OMException("Unable to get volumes info by the given user, "
+          + "metadata might be corrupted", e,
+          ResultCodes.FAILED_METADATA_ERROR);
+    }
+    return volumes;
+  }
+
+  private VolumeList getAllVolumes() throws IOException {
+    // Scan all users in database
+    KeyPrefixFilter filter =
+        new KeyPrefixFilter().addFilter(OzoneConsts.OM_USER_PREFIX);
+    // We are not expecting a huge number of users per cluster,
+    // it should be fine to scan all users in db and return us a
+    // list of volume names in string per user.
+    List<Map.Entry<byte[], byte[]>> rangeKVs = store
+        .getSequentialRangeKVs(null, Integer.MAX_VALUE, filter);
+
+    VolumeList.Builder builder = VolumeList.newBuilder();
+    for (Map.Entry<byte[], byte[]> entry : rangeKVs) {
+      VolumeList volumes = this.getVolumesByUser(entry.getKey());
+      builder.addAllVolumeNames(volumes.getVolumeNamesList());
+    }
+
+    return builder.build();
+  }
+
+  @Override
+  public List<BlockGroup> getPendingDeletionKeys(final int count)
+      throws IOException {
+    List<BlockGroup> keyBlocksList = Lists.newArrayList();
+    List<Map.Entry<byte[], byte[]>> rangeResult =
+        store.getRangeKVs(null, count,
+            MetadataKeyFilters.getDeletingKeyFilter());
+    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
+      OmKeyInfo info =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue()));
+      // Get block keys as a list.
+      OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
+      if (latest == null) {
+        return Collections.emptyList();
+      }
+      List<BlockID> item = latest.getLocationList().stream()
+          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
+          .collect(Collectors.toList());
+      BlockGroup keyBlocks = BlockGroup.newBuilder()
+          .setKeyName(DFSUtil.bytes2String(entry.getKey()))
+          .addAllBlockIDs(item)
+          .build();
+      keyBlocksList.add(keyBlocks);
+    }
+    return keyBlocksList;
+  }
+
+  @Override
+  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
+    List<BlockGroup> keyBlocksList = Lists.newArrayList();
+    long now = Time.now();
+    final MetadataKeyFilter openKeyFilter =
+        new KeyPrefixFilter().addFilter(OPEN_KEY_PREFIX);
+    List<Map.Entry<byte[], byte[]>> rangeResult =
+        store.getSequentialRangeKVs(null, Integer.MAX_VALUE,
+            openKeyFilter);
+    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
+      OmKeyInfo info =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue()));
+      long lastModify = info.getModificationTime();
+      if (now - lastModify < this.openKeyExpireThresholdMS) {
+        // consider as may still be active, not hanging.
+        continue;
+      }
+      // Get block keys as a list.
+      List<BlockID> item = info.getLatestVersionLocations()
+          .getBlocksLatestVersionOnly().stream()
+          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
+          .collect(Collectors.toList());
+      BlockGroup keyBlocks = BlockGroup.newBuilder()
+          .setKeyName(DFSUtil.bytes2String(entry.getKey()))
+          .addAllBlockIDs(item)
+          .build();
+      keyBlocksList.add(keyBlocks);
+    }
+    return keyBlocksList;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
new file mode 100644
index 0000000..8d94f5a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OpenKeyCleanupService.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.BackgroundTask;
+import org.apache.hadoop.utils.BackgroundTaskQueue;
+import org.apache.hadoop.utils.BackgroundTaskResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This is the background service to delete hanging open keys.
+ * Scan the metadata of om periodically to get
+ * the keys with prefix "#open#" and ask scm to
+ * delete metadata accordingly, if scm returns
+ * success for keys, then clean up those keys.
+ */
+public class OpenKeyCleanupService extends BackgroundService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OpenKeyCleanupService.class);
+
+  private final static int OPEN_KEY_DELETING_CORE_POOL_SIZE = 2;
+
+  private final KeyManager keyManager;
+  private final ScmBlockLocationProtocol scmClient;
+
+  public OpenKeyCleanupService(ScmBlockLocationProtocol scmClient,
+      KeyManager keyManager, int serviceInterval,
+      long serviceTimeout) {
+    super("OpenKeyCleanupService", serviceInterval, TimeUnit.SECONDS,
+        OPEN_KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
+    this.keyManager = keyManager;
+    this.scmClient = scmClient;
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new OpenKeyDeletingTask());
+    return queue;
+  }
+
+  private class OpenKeyDeletingTask
+      implements BackgroundTask<BackgroundTaskResult> {
+
+    @Override
+    public int getPriority() {
+      return 0;
+    }
+
+    @Override
+    public BackgroundTaskResult call() throws Exception {
+      try {
+        List<BlockGroup> keyBlocksList = keyManager.getExpiredOpenKeys();
+        if (keyBlocksList.size() > 0) {
+          int toDeleteSize = keyBlocksList.size();
+          LOG.debug("Found {} to-delete open keys in OM", toDeleteSize);
+          List<DeleteBlockGroupResult> results =
+              scmClient.deleteKeyBlocks(keyBlocksList);
+          int deletedSize = 0;
+          for (DeleteBlockGroupResult result : results) {
+            if (result.isSuccess()) {
+              try {
+                keyManager.deleteExpiredOpenKey(result.getObjectKey());
+                LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
+                deletedSize += 1;
+              } catch (IOException e) {
+                LOG.warn("Failed to delete hanging-open key {}",
+                    result.getObjectKey(), e);
+              }
+            } else {
+              LOG.warn("Deleting open Key {} failed because some of the blocks"
+                      + " were failed to delete, failed blocks: {}",
+                  result.getObjectKey(),
+                  StringUtils.join(",", result.getFailedBlocks()));
+            }
+          }
+          LOG.info("Found {} expired open key entries, successfully " +
+              "cleaned up {} entries", toDeleteSize, deletedSize);
+          return results::size;
+        } else {
+          LOG.debug("No hanging open key found in OM");
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to get hanging open keys, retry in"
+            + " next interval", e);
+      }
+      return BackgroundTaskResult.EmptyTaskResult.newResult();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
new file mode 100644
index 0000000..71fa921
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -0,0 +1,911 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.protobuf.BlockingService;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
+import org.apache.hadoop.ozone.common.Storage.StorageState;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ServicePort;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocolPB
+    .ScmBlockLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.protocolPB
+    .StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.util.StringUtils;
+
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
+import static org.apache.hadoop.hdds.HddsUtils.isHddsEnabled;
+import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
+import static org.apache.hadoop.hdds.server.ServerUtils
+    .updateRPCListenAddress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_HANDLER_COUNT_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_HANDLER_COUNT_KEY;
+import static org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneManagerService
+    .newReflectiveBlockingService;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
+    .NodeState.HEALTHY;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+
+/**
+ * Ozone Manager is the metadata manager of ozone.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
+public final class OzoneManager extends ServiceRuntimeInfoImpl
+    implements OzoneManagerProtocol, OMMXBean {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(OzoneManager.class);
+
+  private static final String USAGE =
+      "Usage: \n ozone om [genericOptions] " + "[ "
+          + StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone om [ "
+          + StartupOption.HELP.getName() + " ]\n";
+
+  /** Startup options. */
+  public enum StartupOption {
+    CREATEOBJECTSTORE("-createObjectStore"),
+    HELP("-help"),
+    REGULAR("-regular");
+
+    private final String name;
+
+    StartupOption(String arg) {
+      this.name = arg;
+    }
+
+    public String getName() {
+      return name;
+    }
+
+    public static StartupOption parse(String value) {
+      for (StartupOption option : StartupOption.values()) {
+        if (option.name.equalsIgnoreCase(value)) {
+          return option;
+        }
+      }
+      return null;
+    }
+  }
+
+  private final OzoneConfiguration configuration;
+  private final RPC.Server omRpcServer;
+  private final InetSocketAddress omRpcAddress;
+  private final OMMetadataManager metadataManager;
+  private final VolumeManager volumeManager;
+  private final BucketManager bucketManager;
+  private final KeyManager keyManager;
+  private final OMMetrics metrics;
+  private final OzoneManagerHttpServer httpServer;
+  private final OMStorage omStorage;
+  private final ScmBlockLocationProtocol scmBlockClient;
+  private final StorageContainerLocationProtocol scmContainerClient;
+  private ObjectName omInfoBeanName;
+
+  private OzoneManager(OzoneConfiguration conf) throws IOException {
+    Preconditions.checkNotNull(conf);
+    configuration = conf;
+    omStorage = new OMStorage(conf);
+    scmBlockClient = getScmBlockClient(configuration);
+    scmContainerClient = getScmContainerClient(configuration);
+    if (omStorage.getState() != StorageState.INITIALIZED) {
+      throw new OMException("OM not initialized.",
+          ResultCodes.OM_NOT_INITIALIZED);
+    }
+
+    // verifies that the SCM info in the OM Version file is correct.
+    ScmInfo scmInfo = scmBlockClient.getScmInfo();
+    if (!(scmInfo.getClusterId().equals(omStorage.getClusterID()) && scmInfo
+        .getScmId().equals(omStorage.getScmId()))) {
+      throw new OMException("SCM version info mismatch.",
+          ResultCodes.SCM_VERSION_MISMATCH_ERROR);
+    }
+    final int handlerCount = conf.getInt(OZONE_OM_HANDLER_COUNT_KEY,
+        OZONE_OM_HANDLER_COUNT_DEFAULT);
+
+    RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class,
+        ProtobufRpcEngine.class);
+
+    BlockingService omService = newReflectiveBlockingService(
+        new OzoneManagerProtocolServerSideTranslatorPB(this));
+    final InetSocketAddress omNodeRpcAddr =
+        getOmAddress(configuration);
+    omRpcServer = startRpcServer(configuration, omNodeRpcAddr,
+        OzoneManagerProtocolPB.class, omService,
+        handlerCount);
+    omRpcAddress = updateRPCListenAddress(configuration,
+        OZONE_OM_ADDRESS_KEY, omNodeRpcAddr, omRpcServer);
+    metadataManager = new OmMetadataManagerImpl(configuration);
+    volumeManager = new VolumeManagerImpl(metadataManager, configuration);
+    bucketManager = new BucketManagerImpl(metadataManager);
+    metrics = OMMetrics.create();
+    keyManager =
+        new KeyManagerImpl(scmBlockClient, metadataManager, configuration,
+            omStorage.getOmId());
+    httpServer = new OzoneManagerHttpServer(configuration, this);
+  }
+
+  /**
+   * Create a scm block client, used by putKey() and getKey().
+   *
+   * @return {@link ScmBlockLocationProtocol}
+   * @throws IOException
+   */
+  private static ScmBlockLocationProtocol getScmBlockClient(
+      OzoneConfiguration conf) throws IOException {
+    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    long scmVersion =
+        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
+    InetSocketAddress scmBlockAddress =
+        getScmAddressForBlockClients(conf);
+    ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
+        new ScmBlockLocationProtocolClientSideTranslatorPB(
+            RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
+                scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
+                NetUtils.getDefaultSocketFactory(conf),
+                Client.getRpcTimeout(conf)));
+    return scmBlockLocationClient;
+  }
+
+  /**
+   * Returns a scm container client.
+   *
+   * @return {@link StorageContainerLocationProtocol}
+   * @throws IOException
+   */
+  private static StorageContainerLocationProtocol getScmContainerClient(
+      OzoneConfiguration conf) throws IOException {
+    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
+        ProtobufRpcEngine.class);
+    long scmVersion =
+        RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
+    InetSocketAddress scmAddr = getScmAddressForClients(
+        conf);
+    StorageContainerLocationProtocolClientSideTranslatorPB scmContainerClient =
+        new StorageContainerLocationProtocolClientSideTranslatorPB(
+            RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
+                scmAddr, UserGroupInformation.getCurrentUser(), conf,
+                NetUtils.getDefaultSocketFactory(conf),
+                Client.getRpcTimeout(conf)));
+    return scmContainerClient;
+  }
+
+  @VisibleForTesting
+  public KeyManager getKeyManager() {
+    return keyManager;
+  }
+
+  @VisibleForTesting
+  public ScmInfo getScmInfo() throws IOException {
+    return scmBlockClient.getScmInfo();
+  }
+
+  @VisibleForTesting
+  public OMStorage getOmStorage() {
+    return omStorage;
+  }
+  /**
+   * Starts an RPC server, if configured.
+   *
+   * @param conf configuration
+   * @param addr configured address of RPC server
+   * @param protocol RPC protocol provided by RPC server
+   * @param instance RPC protocol implementation instance
+   * @param handlerCount RPC server handler count
+   *
+   * @return RPC server
+   * @throws IOException if there is an I/O error while creating RPC server
+   */
+  private static RPC.Server startRpcServer(OzoneConfiguration conf,
+      InetSocketAddress addr, Class<?> protocol, BlockingService instance,
+      int handlerCount) throws IOException {
+    RPC.Server rpcServer = new RPC.Builder(conf)
+        .setProtocol(protocol)
+        .setInstance(instance)
+        .setBindAddress(addr.getHostString())
+        .setPort(addr.getPort())
+        .setNumHandlers(handlerCount)
+        .setVerbose(false)
+        .setSecretManager(null)
+        .build();
+
+    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
+    return rpcServer;
+  }
+
+  /**
+   * Get metadata manager.
+   * @return metadata manager.
+   */
+  public OMMetadataManager getMetadataManager() {
+    return metadataManager;
+  }
+
+  public OMMetrics getMetrics() {
+    return metrics;
+  }
+
+  /**
+   * Main entry point for starting OzoneManager.
+   *
+   * @param argv arguments
+   * @throws IOException if startup fails due to I/O error
+   */
+  public static void main(String[] argv) throws IOException {
+    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
+      System.exit(0);
+    }
+    try {
+      OzoneConfiguration conf = new OzoneConfiguration();
+      GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
+      if (!hParser.isParseSuccessful()) {
+        System.err.println("USAGE: " + USAGE + " \n");
+        hParser.printGenericCommandUsage(System.err);
+        System.exit(1);
+      }
+      StringUtils.startupShutdownMessage(OzoneManager.class, argv, LOG);
+      OzoneManager om = createOm(hParser.getRemainingArgs(), conf);
+      if (om != null) {
+        om.start();
+        om.join();
+      }
+    } catch (Throwable t) {
+      LOG.error("Failed to start the OzoneManager.", t);
+      terminate(1, t);
+    }
+  }
+
+  private static void printUsage(PrintStream out) {
+    out.println(USAGE + "\n");
+  }
+
+  /**
+   * Constructs OM instance based on command line arguments.
+   * @param argv Command line arguments
+   * @param conf OzoneConfiguration
+   * @return OM instance
+   * @throws IOException in case OM instance creation fails.
+   */
+
+  public static OzoneManager createOm(String[] argv,
+                                      OzoneConfiguration conf) throws IOException {
+    if (!isHddsEnabled(conf)) {
+      System.err.println("OM cannot be started in secure mode or when " +
+          OZONE_ENABLED + " is set to false");
+      System.exit(1);
+    }
+    StartupOption startOpt = parseArguments(argv);
+    if (startOpt == null) {
+      printUsage(System.err);
+      terminate(1);
+      return null;
+    }
+    switch (startOpt) {
+    case CREATEOBJECTSTORE:
+      terminate(omInit(conf) ? 0 : 1);
+      return null;
+    case HELP:
+      printUsage(System.err);
+      terminate(0);
+      return null;
+    default:
+      return new OzoneManager(conf);
+    }
+  }
+
+  /**
+   * Initializes the OM instance.
+   * @param conf OzoneConfiguration
+   * @return true if OM initialization succeeds, false otherwise
+   * @throws IOException in case ozone metadata directory path is not accessible
+   */
+
+  private static boolean omInit(OzoneConfiguration conf) throws IOException {
+    OMStorage omStorage = new OMStorage(conf);
+    StorageState state = omStorage.getState();
+    if (state != StorageState.INITIALIZED) {
+      try {
+        ScmBlockLocationProtocol scmBlockClient = getScmBlockClient(conf);
+        ScmInfo scmInfo = scmBlockClient.getScmInfo();
+        String clusterId = scmInfo.getClusterId();
+        String scmId = scmInfo.getScmId();
+        if (clusterId == null || clusterId.isEmpty()) {
+          throw new IOException("Invalid Cluster ID");
+        }
+        if (scmId == null || scmId.isEmpty()) {
+          throw new IOException("Invalid SCM ID");
+        }
+        omStorage.setClusterId(clusterId);
+        omStorage.setScmId(scmId);
+        omStorage.initialize();
+        System.out.println(
+            "OM initialization succeeded.Current cluster id for sd="
+                + omStorage.getStorageDir() + ";cid=" + omStorage
+                .getClusterID());
+        return true;
+      } catch (IOException ioe) {
+        LOG.error("Could not initialize OM version file", ioe);
+        return false;
+      }
+    } else {
+      System.out.println(
+          "OM already initialized.Reusing existing cluster id for sd="
+              + omStorage.getStorageDir() + ";cid=" + omStorage
+              .getClusterID());
+      return true;
+    }
+  }
+
+  /**
+   * Parses the command line options for OM initialization.
+   * @param args command line arguments
+   * @return StartupOption if options are valid, null otherwise
+   */
+  private static StartupOption parseArguments(String[] args) {
+    if (args == null || args.length == 0) {
+      return StartupOption.REGULAR;
+    } else if (args.length == 1) {
+      return StartupOption.parse(args[0]);
+    }
+    return null;
+  }
+
+  /**
+   * Builds a message for logging startup information about an RPC server.
+   *
+   * @param description RPC server description
+   * @param addr RPC server listening address
+   * @return server startup message
+   */
+  private static String buildRpcServerStartMessage(String description,
+      InetSocketAddress addr) {
+    return addr != null ? String.format("%s is listening at %s",
+        description, addr.toString()) :
+        String.format("%s not started", description);
+  }
+
+  /**
+   * Start service.
+   */
+  public void start() throws IOException {
+    LOG.info(buildRpcServerStartMessage("OzoneManager RPC server",
+        omRpcAddress));
+    DefaultMetricsSystem.initialize("OzoneManager");
+    metadataManager.start();
+    keyManager.start();
+    omRpcServer.start();
+    httpServer.start();
+    registerMXBean();
+    setStartTime();
+  }
+
+  /**
+   * Stop service.
+   */
+  public void stop() {
+    try {
+      metadataManager.stop();
+      omRpcServer.stop();
+      keyManager.stop();
+      httpServer.stop();
+      metrics.unRegister();
+      unregisterMXBean();
+    } catch (Exception e) {
+      LOG.error("OzoneManager stop failed.", e);
+    }
+  }
+
+  /**
+   * Wait until service has completed shutdown.
+   */
+  public void join() {
+    try {
+      omRpcServer.join();
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      LOG.info("Interrupted during OzoneManager join.", e);
+    }
+  }
+
+  /**
+   * Creates a volume.
+   *
+   * @param args - Arguments to create Volume.
+   * @throws IOException
+   */
+  @Override
+  public void createVolume(OmVolumeArgs args) throws IOException {
+    try {
+      metrics.incNumVolumeCreates();
+      volumeManager.createVolume(args);
+    } catch (Exception ex) {
+      metrics.incNumVolumeCreateFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  @Override
+  public void setOwner(String volume, String owner) throws IOException {
+    try {
+      metrics.incNumVolumeUpdates();
+      volumeManager.setOwner(volume, owner);
+    } catch (Exception ex) {
+      metrics.incNumVolumeUpdateFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  @Override
+  public void setQuota(String volume, long quota) throws IOException {
+    try {
+      metrics.incNumVolumeUpdates();
+      volumeManager.setQuota(volume, quota);
+    } catch (Exception ex) {
+      metrics.incNumVolumeUpdateFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Checks if the specified user can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acls which needs to be checked for access
+   * @return true if the user has required access for the volume,
+   *         false otherwise
+   * @throws IOException
+   */
+  @Override
+  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException {
+    try {
+      metrics.incNumVolumeCheckAccesses();
+      return volumeManager.checkVolumeAccess(volume, userAcl);
+    } catch (Exception ex) {
+      metrics.incNumVolumeCheckAccessFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Gets the volume information.
+   *
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  @Override
+  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+    try {
+      metrics.incNumVolumeInfos();
+      return volumeManager.getVolumeInfo(volume);
+    } catch (Exception ex) {
+      metrics.incNumVolumeInfoFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  @Override
+  public void deleteVolume(String volume) throws IOException {
+    try {
+      metrics.incNumVolumeDeletes();
+      volumeManager.deleteVolume(volume);
+    } catch (Exception ex) {
+      metrics.incNumVolumeDeleteFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Lists volume owned by a specific user.
+   *
+   * @param userName - user name
+   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the
+   * prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  @Override
+  public List<OmVolumeArgs> listVolumeByUser(String userName, String prefix,
+                                             String prevKey, int maxKeys) throws IOException {
+    try {
+      metrics.incNumVolumeLists();
+      return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys);
+    } catch (Exception ex) {
+      metrics.incNumVolumeListFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Lists volume all volumes in the cluster.
+   *
+   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the
+   * prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  @Override
+  public List<OmVolumeArgs> listAllVolumes(String prefix, String prevKey, int
+      maxKeys) throws IOException {
+    try {
+      metrics.incNumVolumeLists();
+      return volumeManager.listVolumes(null, prefix, prevKey, maxKeys);
+    } catch (Exception ex) {
+      metrics.incNumVolumeListFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Creates a bucket.
+   *
+   * @param bucketInfo - BucketInfo to create bucket.
+   * @throws IOException
+   */
+  @Override
+  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
+    try {
+      metrics.incNumBucketCreates();
+      bucketManager.createBucket(bucketInfo);
+    } catch (Exception ex) {
+      metrics.incNumBucketCreateFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmBucketInfo> listBuckets(String volumeName,
+                                        String startKey, String prefix, int maxNumOfBuckets)
+      throws IOException {
+    try {
+      metrics.incNumBucketLists();
+      return bucketManager.listBuckets(volumeName,
+          startKey, prefix, maxNumOfBuckets);
+    } catch (IOException ex) {
+      metrics.incNumBucketListFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Gets the bucket information.
+   *
+   * @param volume - Volume name.
+   * @param bucket - Bucket name.
+   * @return OmBucketInfo or exception is thrown.
+   * @throws IOException
+   */
+  @Override
+  public OmBucketInfo getBucketInfo(String volume, String bucket)
+      throws IOException {
+    try {
+      metrics.incNumBucketInfos();
+      return bucketManager.getBucketInfo(volume, bucket);
+    } catch (Exception ex) {
+      metrics.incNumBucketInfoFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Allocate a key.
+   *
+   * @param args - attributes of the key.
+   * @return OmKeyInfo - the info about the allocated key.
+   * @throws IOException
+   */
+  @Override
+  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
+    try {
+      metrics.incNumKeyAllocates();
+      return keyManager.openKey(args);
+    } catch (Exception ex) {
+      metrics.incNumKeyAllocateFails();
+      throw ex;
+    }
+  }
+
+  @Override
+  public void commitKey(OmKeyArgs args, int clientID)
+      throws IOException {
+    try {
+      metrics.incNumKeyCommits();
+      keyManager.commitKey(args, clientID);
+    } catch (Exception ex) {
+      metrics.incNumKeyCommitFails();
+      throw ex;
+    }
+  }
+
+  @Override
+  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException {
+    try {
+      metrics.incNumBlockAllocateCalls();
+      return keyManager.allocateBlock(args, clientID);
+    } catch (Exception ex) {
+      metrics.incNumBlockAllocateCallFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Lookup a key.
+   *
+   * @param args - attributes of the key.
+   * @return OmKeyInfo - the info about the requested key.
+   * @throws IOException
+   */
+  @Override
+  public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
+    try {
+      metrics.incNumKeyLookups();
+      return keyManager.lookupKey(args);
+    } catch (Exception ex) {
+      metrics.incNumKeyLookupFails();
+      throw ex;
+    }
+  }
+
+  @Override
+  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
+    try {
+      metrics.incNumKeyRenames();
+      keyManager.renameKey(args, toKeyName);
+    } catch (IOException e) {
+      metrics.incNumKeyRenameFails();
+      throw e;
+    }
+  }
+
+  /**
+   * Deletes an existing key.
+   *
+   * @param args - attributes of the key.
+   * @throws IOException
+   */
+  @Override
+  public void deleteKey(OmKeyArgs args) throws IOException {
+    try {
+      metrics.incNumKeyDeletes();
+      keyManager.deleteKey(args);
+    } catch (Exception ex) {
+      metrics.incNumKeyDeleteFails();
+      throw ex;
+    }
+  }
+
+  @Override
+  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+                                  String startKey, String keyPrefix, int maxKeys) throws IOException {
+    try {
+      metrics.incNumKeyLists();
+      return keyManager.listKeys(volumeName, bucketName,
+          startKey, keyPrefix, maxKeys);
+    } catch (IOException ex) {
+      metrics.incNumKeyListFails();
+      throw ex;
+    }
+  }
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  @Override
+  public void setBucketProperty(OmBucketArgs args)
+      throws IOException {
+    try {
+      metrics.incNumBucketUpdates();
+      bucketManager.setBucketProperty(args);
+    } catch (Exception ex) {
+      metrics.incNumBucketUpdateFails();
+      throw ex;
+    }
+  }
+
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volume - Name of the volume.
+   * @param bucket - Name of the bucket.
+   * @throws IOException
+   */
+  public void deleteBucket(String volume, String bucket) throws IOException {
+    try {
+      metrics.incNumBucketDeletes();
+      bucketManager.deleteBucket(volume, bucket);
+    } catch (Exception ex) {
+      metrics.incNumBucketDeleteFails();
+      throw ex;
+    }
+  }
+
+  private void registerMXBean() {
+    Map<String, String> jmxProperties = new HashMap<String, String>();
+    jmxProperties.put("component", "ServerRuntime");
+    this.omInfoBeanName =
+        MBeans.register("OzoneManager",
+            "OzoneManagerInfo",
+            jmxProperties,
+            this);
+  }
+
+  private void unregisterMXBean() {
+    if (this.omInfoBeanName != null) {
+      MBeans.unregister(this.omInfoBeanName);
+      this.omInfoBeanName = null;
+    }
+  }
+
+  @Override
+  public String getRpcPort() {
+    return "" + omRpcAddress.getPort();
+  }
+
+  @VisibleForTesting
+  public OzoneManagerHttpServer getHttpServer() {
+    return httpServer;
+  }
+
+  @Override
+  public List<ServiceInfo> getServiceList() throws IOException {
+    // When we implement multi-home this call has to be handled properly.
+    List<ServiceInfo> services = new ArrayList<>();
+    ServiceInfo.Builder omServiceInfoBuilder = ServiceInfo.newBuilder()
+        .setNodeType(HddsProtos.NodeType.OM)
+        .setHostname(omRpcAddress.getHostName())
+        .addServicePort(ServicePort.newBuilder()
+                .setType(ServicePort.Type.RPC)
+                .setValue(omRpcAddress.getPort())
+            .build());
+    if (httpServer.getHttpAddress() != null) {
+      omServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
+          .setType(ServicePort.Type.HTTP)
+          .setValue(httpServer.getHttpAddress().getPort())
+          .build());
+    }
+    if (httpServer.getHttpsAddress() != null) {
+      omServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
+          .setType(ServicePort.Type.HTTPS)
+          .setValue(httpServer.getHttpsAddress().getPort())
+          .build());
+    }
+    services.add(omServiceInfoBuilder.build());
+
+    // For client we have to return SCM with container protocol port,
+    // not block protocol.
+    InetSocketAddress scmAddr = getScmAddressForClients(
+        configuration);
+    ServiceInfo.Builder scmServiceInfoBuilder = ServiceInfo.newBuilder()
+        .setNodeType(HddsProtos.NodeType.SCM)
+        .setHostname(scmAddr.getHostName())
+        .addServicePort(ServicePort.newBuilder()
+            .setType(ServicePort.Type.RPC)
+            .setValue(scmAddr.getPort()).build());
+    services.add(scmServiceInfoBuilder.build());
+
+    List<HddsProtos.Node> nodes = scmContainerClient.queryNode(HEALTHY,
+        HddsProtos.QueryScope.CLUSTER, "");
+
+    for (HddsProtos.Node node : nodes) {
+      HddsProtos.DatanodeDetailsProto datanode = node.getNodeID();
+
+      ServiceInfo.Builder dnServiceInfoBuilder = ServiceInfo.newBuilder()
+          .setNodeType(HddsProtos.NodeType.DATANODE)
+          .setHostname(datanode.getHostName());
+
+      dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
+          .setType(ServicePort.Type.HTTP)
+          .setValue(DatanodeDetails.getFromProtoBuf(datanode)
+              .getPort(DatanodeDetails.Port.Name.REST).getValue())
+          .build());
+
+      services.add(dnServiceInfoBuilder.build());
+    }
+
+    metrics.incNumGetServiceLists();
+    // For now there is no exception that can can happen in this call,
+    // so failure metrics is not handled. In future if there is any need to
+    // handle exception in this method, we need to incorporate
+    // metrics.incNumGetServiceListFails()
+    return services;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
new file mode 100644
index 0000000..bd6ab69
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.hdds.server.BaseHttpServer;
+
+import java.io.IOException;
+
+/**
+ * HttpServer wrapper for the OzoneManager.
+ */
+public class OzoneManagerHttpServer extends BaseHttpServer {
+
+  public OzoneManagerHttpServer(Configuration conf, OzoneManager om)
+      throws IOException {
+    super(conf, "ozoneManager");
+    addServlet("serviceList", "/serviceList", ServiceListJSONServlet.class);
+    getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om);
+  }
+
+  @Override protected String getHttpAddressKey() {
+    return OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
+  }
+
+  @Override protected String getHttpBindHostKey() {
+    return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_KEY;
+  }
+
+  @Override protected String getHttpsAddressKey() {
+    return OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY;
+  }
+
+  @Override protected String getHttpsBindHostKey() {
+    return OMConfigKeys.OZONE_OM_HTTPS_BIND_HOST_KEY;
+  }
+
+  @Override protected String getBindHostDefault() {
+    return OMConfigKeys.OZONE_OM_HTTP_BIND_HOST_DEFAULT;
+  }
+
+  @Override protected int getHttpBindPortDefault() {
+    return OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT;
+  }
+
+  @Override protected int getHttpsBindPortDefault() {
+    return OMConfigKeys.OZONE_OM_HTTPS_BIND_PORT_DEFAULT;
+  }
+
+  @Override protected String getKeytabFile() {
+    return OMConfigKeys.OZONE_OM_KEYTAB_FILE;
+  }
+
+  @Override protected String getSpnegoPrincipal() {
+    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
+  }
+
+  @Override protected String getEnabledKey() {
+    return OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
new file mode 100644
index 0000000..47713e2
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ServiceListJSONServlet.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+
+/**
+ * Provides REST access to Ozone Service List.
+ * <p>
+ * This servlet generally will be placed under the /serviceList URL of
+ * OzoneManager HttpServer.
+ *
+ * The return format is of JSON and in the form
+ * <p>
+ *  <code><pre>
+ *  {
+ *    "services" : [
+ *      {
+ *        "NodeType":"OM",
+ *        "Hostname" "$hostname",
+ *        "ports" : {
+ *          "$PortType" : "$port",
+ *          ...
+ *        }
+ *      }
+ *    ]
+ *  }
+ *  </pre></code>
+ *  <p>
+ *
+ */
+public class ServiceListJSONServlet  extends HttpServlet  {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ServiceListJSONServlet.class);
+  private static final long serialVersionUID = 1L;
+
+  private transient OzoneManager om;
+
+  public void init() throws ServletException {
+    this.om = (OzoneManager) getServletContext()
+        .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
+  }
+
+  /**
+   * Process a GET request for the specified resource.
+   *
+   * @param request
+   *          The servlet request we are processing
+   * @param response
+   *          The servlet response we are creating
+   */
+  @Override
+  public void doGet(HttpServletRequest request, HttpServletResponse response) {
+    try {
+      ObjectMapper objectMapper = new ObjectMapper();
+      objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
+      response.setContentType("application/json; charset=utf8");
+      PrintWriter writer = response.getWriter();
+      try {
+        writer.write(objectMapper.writeValueAsString(om.getServiceList()));
+      } finally {
+        if (writer != null) {
+          writer.close();
+        }
+      }
+    } catch (IOException e) {
+      LOG.error(
+          "Caught an exception while processing ServiceList request", e);
+      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
new file mode 100644
index 0000000..8475dd9
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManager.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * OM volume manager interface.
+ */
+public interface VolumeManager {
+
+  /**
+   * Create a new volume.
+   * @param args - Volume args to create a volume
+   */
+  void createVolume(OmVolumeArgs args) throws IOException;
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  void setOwner(String volume, String owner) throws IOException;
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  void setQuota(String volume, long quota) throws IOException;
+
+  /**
+   * Gets the volume information.
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  OmVolumeArgs getVolumeInfo(String volume) throws IOException;
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  void deleteVolume(String volume) throws IOException;
+
+  /**
+   * Checks if the specified user with a role can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acl which needs to be checked for access
+   * @return true if the user has access for the volume, false otherwise
+   * @throws IOException
+   */
+  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException;
+
+  /**
+   * Returns a list of volumes owned by a given user; if user is null,
+   * returns all volumes.
+   *
+   * @param userName
+   *   volume owner
+   * @param prefix
+   *   the volume prefix used to filter the listing result.
+   * @param startKey
+   *   the start volume name determines where to start listing from,
+   *   this key is excluded from the result.
+   * @param maxKeys
+   *   the maximum number of volumes to return.
+   * @return a list of {@link OmVolumeArgs}
+   * @throws IOException
+   */
+  List<OmVolumeArgs> listVolumes(String userName, String prefix,
+                                 String startKey, int maxKeys) throws IOException;
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java
deleted file mode 100644
index 5ec1db8..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Handles key level commands.
- */
-public interface KeyManager {
-
-  /**
-   * Start key manager.
-   */
-  void start();
-
-  /**
-   * Stop key manager.
-   */
-  void stop() throws IOException;
-
-  /**
-   * After calling commit, the key will be made visible. There can be multiple
-   * open key writes in parallel (identified by client id). The most recently
-   * committed one will be the one visible.
-   *
-   * @param args the key to commit.
-   * @param clientID the client that is committing.
-   * @throws IOException
-   */
-  void commitKey(KsmKeyArgs args, int clientID) throws IOException;
-
-  /**
-   * A client calls this on an open key, to request to allocate a new block,
-   * and appended to the tail of current block list of the open client.
-   *
-   * @param args the key to append
-   * @param clientID the client requesting block.
-   * @return the reference to the new block.
-   * @throws IOException
-   */
-  KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException;
-  /**
-   * Given the args of a key to put, write an open key entry to meta data.
-   *
-   * In case that the container creation or key write failed on
-   * DistributedStorageHandler, this key's metadata will still stay in KSM.
-   * TODO garbage collect the open keys that never get closed
-   *
-   * @param args the args of the key provided by client.
-   * @return a OpenKeySession instance client uses to talk to container.
-   * @throws Exception
-   */
-  OpenKeySession openKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Look up an existing key. Return the info of the key to client side, which
-   * DistributedStorageHandler will use to access the data on datanode.
-   *
-   * @param args the args of the key provided by client.
-   * @return a KsmKeyInfo instance client uses to talk to container.
-   * @throws IOException
-   */
-  KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Renames an existing key within a bucket.
-   *
-   * @param args the args of the key provided by client.
-   * @param toKeyName New name to be used for the key
-   * @throws IOException if specified key doesn't exist or
-   * some other I/O errors while renaming the key.
-   */
-  void renameKey(KsmKeyArgs args, String toKeyName) throws IOException;
-
-  /**
-   * Deletes an object by an object key. The key will be immediately removed
-   * from KSM namespace and become invisible to clients. The object data
-   * will be removed in async manner that might retain for some time.
-   *
-   * @param args the args of the key provided by client.
-   * @throws IOException if specified key doesn't exist or
-   * some other I/O errors while deleting an object.
-   */
-  void deleteKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link KsmKeyInfo}
-   * in the given bucket.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param bucketName
-   *   the name of the bucket.
-   * @param startKey
-   *   the start key name, only the keys whose name is
-   *   after this value will be included in the result.
-   *   This key is excluded from the result.
-   * @param keyPrefix
-   *   key name prefix, only the keys whose name has
-   *   this prefix will be included in the result.
-   * @param maxKeys
-   *   the maximum number of keys to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<KsmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKey, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns a list of pending deletion key info that ups to the given count.
-   * Each entry is a {@link BlockGroup}, which contains the info about the
-   * key name and all its associated block IDs. A pending deletion key is
-   * stored with #deleting# prefix in KSM DB.
-   *
-   * @param count max number of keys to return.
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
-
-  /**
-   * Deletes a pending deletion key by its name. This is often called when
-   * key can be safely deleted from this layer. Once called, all footprints
-   * of the key will be purged from KSM DB.
-   *
-   * @param objectKeyName object key name with #deleting# prefix.
-   * @throws IOException if specified key doesn't exist or other I/O errors.
-   */
-  void deletePendingDeletionKey(String objectKeyName) throws IOException;
-
-  /**
-   * Returns a list of all still open key info. Which contains the info about
-   * the key name and all its associated block IDs. A pending open key has
-   * prefix #open# in KSM DB.
-   *
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getExpiredOpenKeys() throws IOException;
-
-  /**
-   * Deletes a expired open key by its name. Called when a hanging key has been
-   * lingering for too long. Once called, the open key entries gets removed
-   * from KSM mdata data.
-   *
-   * @param objectKeyName object key name with #open# prefix.
-   * @throws IOException if specified key doesn't exist or other I/O errors.
-   */
-  void deleteExpiredOpenKey(String objectKeyName) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
deleted file mode 100644
index 0d4cfda..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java
+++ /dev/null
@@ -1,566 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.BatchOperation;
-import org.iq80.leveldb.DBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone
-    .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB;
-import org.apache.hadoop.hdds.protocol
-    .proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol
-    .proto.HddsProtos.ReplicationFactor;
-
-
-/**
- * Implementation of keyManager.
- */
-public class KeyManagerImpl implements KeyManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyManagerImpl.class);
-
-  /**
-   * A SCM block client, used to talk to SCM to allocate block during putKey.
-   */
-  private final ScmBlockLocationProtocol scmBlockClient;
-  private final KSMMetadataManager metadataManager;
-  private final long scmBlockSize;
-  private final boolean useRatis;
-  private final BackgroundService keyDeletingService;
-  private final BackgroundService openKeyCleanupService;
-
-  private final long preallocateMax;
-  private final Random random;
-  private final String ksmId;
-
-  public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient,
-      KSMMetadataManager metadataManager, OzoneConfiguration conf,
-      String ksmId) {
-    this.scmBlockClient = scmBlockClient;
-    this.metadataManager = metadataManager;
-    this.scmBlockSize = conf.getLong(OZONE_SCM_BLOCK_SIZE_IN_MB,
-        OZONE_SCM_BLOCK_SIZE_DEFAULT) * OzoneConsts.MB;
-    this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY,
-        DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
-    long  blockDeleteInterval = conf.getTimeDuration(
-        OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    long serviceTimeout = conf.getTimeDuration(
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    this.preallocateMax = conf.getLong(
-        OZONE_KEY_PREALLOCATION_MAXSIZE,
-        OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT);
-    keyDeletingService = new KeyDeletingService(
-        scmBlockClient, this, blockDeleteInterval, serviceTimeout, conf);
-    int openkeyCheckInterval = conf.getInt(
-        OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS,
-        OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT);
-    openKeyCleanupService = new OpenKeyCleanupService(
-        scmBlockClient, this, openkeyCheckInterval, serviceTimeout);
-    random = new Random();
-    this.ksmId = ksmId;
-  }
-
-  @VisibleForTesting
-  public BackgroundService getOpenKeyCleanupService() {
-    return openKeyCleanupService;
-  }
-
-  @Override
-  public void start() {
-    keyDeletingService.start();
-    openKeyCleanupService.start();
-  }
-
-  @Override
-  public void stop() throws IOException {
-    keyDeletingService.shutdown();
-    openKeyCleanupService.shutdown();
-  }
-
-  private void validateBucket(String volumeName, String bucketName)
-      throws IOException {
-    byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
-    byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-
-    //Check if the volume exists
-    if(metadataManager.get(volumeKey) == null) {
-      LOG.error("volume not found: {}", volumeName);
-      throw new KSMException("Volume not found",
-          KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
-    }
-    //Check if bucket already exists
-    if(metadataManager.get(bucketKey) == null) {
-      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
-      throw new KSMException("Bucket not found",
-          KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
-    }
-  }
-
-  @Override
-  public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-
-    try {
-      validateBucket(volumeName, bucketName);
-      String objectKey = metadataManager.getKeyWithDBPrefix(
-          volumeName, bucketName, keyName);
-      byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID);
-      byte[] keyData = metadataManager.get(openKey);
-      if (keyData == null) {
-        LOG.error("Allocate block for a key not in open status in meta store " +
-            objectKey + " with ID " + clientID);
-        throw new KSMException("Open Key not found",
-            KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-      KsmKeyInfo keyInfo =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(keyData));
-      AllocatedBlock allocatedBlock =
-          scmBlockClient.allocateBlock(scmBlockSize, keyInfo.getType(),
-              keyInfo.getFactor(), ksmId);
-      KsmKeyLocationInfo info = new KsmKeyLocationInfo.Builder()
-          .setBlockID(allocatedBlock.getBlockID())
-          .setShouldCreateContainer(allocatedBlock.getCreateContainer())
-          .setLength(scmBlockSize)
-          .setOffset(0)
-          .build();
-      // current version not committed, so new blocks coming now are added to
-      // the same version
-      keyInfo.appendNewBlocks(Collections.singletonList(info));
-      keyInfo.updateModifcationTime();
-      metadataManager.put(openKey, keyInfo.getProtobuf().toByteArray());
-      return info;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public OpenKeySession openKey(KsmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    ReplicationFactor factor = args.getFactor();
-    ReplicationType type = args.getType();
-
-    // If user does not specify a replication strategy or
-    // replication factor, KSM will use defaults.
-    if(factor == null) {
-      factor = useRatis ? ReplicationFactor.THREE: ReplicationFactor.ONE;
-    }
-
-    if(type == null) {
-      type = useRatis ? ReplicationType.RATIS : ReplicationType.STAND_ALONE;
-    }
-
-    try {
-      validateBucket(volumeName, bucketName);
-      long requestedSize = Math.min(preallocateMax, args.getDataSize());
-      List<KsmKeyLocationInfo> locations = new ArrayList<>();
-      String objectKey = metadataManager.getKeyWithDBPrefix(
-          volumeName, bucketName, keyName);
-      // requested size is not required but more like a optimization:
-      // SCM looks at the requested, if it 0, no block will be allocated at
-      // the point, if client needs more blocks, client can always call
-      // allocateBlock. But if requested size is not 0, KSM will preallocate
-      // some blocks and piggyback to client, to save RPC calls.
-      while (requestedSize > 0) {
-        long allocateSize = Math.min(scmBlockSize, requestedSize);
-        AllocatedBlock allocatedBlock =
-            scmBlockClient.allocateBlock(allocateSize, type, factor, ksmId);
-        KsmKeyLocationInfo subKeyInfo = new KsmKeyLocationInfo.Builder()
-            .setBlockID(allocatedBlock.getBlockID())
-            .setShouldCreateContainer(allocatedBlock.getCreateContainer())
-            .setLength(allocateSize)
-            .setOffset(0)
-            .build();
-        locations.add(subKeyInfo);
-        requestedSize -= allocateSize;
-      }
-      // NOTE size of a key is not a hard limit on anything, it is a value that
-      // client should expect, in terms of current size of key. If client sets a
-      // value, then this value is used, otherwise, we allocate a single block
-      // which is the current size, if read by the client.
-      long size = args.getDataSize() >= 0 ? args.getDataSize() : scmBlockSize;
-      byte[] keyKey = metadataManager.getDBKeyBytes(
-          volumeName, bucketName, keyName);
-      byte[] value = metadataManager.get(keyKey);
-      KsmKeyInfo keyInfo;
-      long openVersion;
-      if (value != null) {
-        // the key already exist, the new blocks will be added as new version
-        keyInfo = KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
-        // when locations.size = 0, the new version will have identical blocks
-        // as its previous version
-        openVersion = keyInfo.addNewVersion(locations);
-        keyInfo.setDataSize(size + keyInfo.getDataSize());
-      } else {
-        // the key does not exist, create a new object, the new blocks are the
-        // version 0
-        long currentTime = Time.now();
-        keyInfo = new KsmKeyInfo.Builder()
-            .setVolumeName(args.getVolumeName())
-            .setBucketName(args.getBucketName())
-            .setKeyName(args.getKeyName())
-            .setKsmKeyLocationInfos(Collections.singletonList(
-                new KsmKeyLocationInfoGroup(0, locations)))
-            .setCreationTime(currentTime)
-            .setModificationTime(currentTime)
-            .setDataSize(size)
-            .setReplicationType(type)
-            .setReplicationFactor(factor)
-            .build();
-        openVersion = 0;
-      }
-      // Generate a random ID which is not already in meta db.
-      int id = -1;
-      // in general this should finish in a couple times at most. putting some
-      // arbitrary large number here to avoid dead loop.
-      for (int j = 0; j < 10000; j++) {
-        id = random.nextInt();
-        byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, id);
-        if (metadataManager.get(openKey) == null) {
-          metadataManager.put(openKey, keyInfo.getProtobuf().toByteArray());
-          break;
-        }
-      }
-      if (id == -1) {
-        throw new IOException("Failed to find a usable id for " + objectKey);
-      }
-      LOG.debug("Key {} allocated in volume {} bucket {}",
-          keyName, volumeName, bucketName);
-      return new OpenKeySession(id, keyInfo, openVersion);
-    } catch (KSMException e) {
-      throw e;
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Key open failed for volume:{} bucket:{} key:{}",
-            volumeName, bucketName, keyName, ex);
-      }
-      throw new KSMException(ex.getMessage(),
-          KSMException.ResultCodes.FAILED_KEY_ALLOCATION);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void commitKey(KsmKeyArgs args, int clientID) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    try {
-      validateBucket(volumeName, bucketName);
-      String objectKey = metadataManager.getKeyWithDBPrefix(
-          volumeName, bucketName, keyName);
-      byte[] objectKeyBytes = metadataManager.getDBKeyBytes(volumeName,
-          bucketName, keyName);
-      byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID);
-      byte[] openKeyData = metadataManager.get(openKey);
-      if (openKeyData == null) {
-        throw new KSMException("Commit a key without corresponding entry " +
-            DFSUtil.bytes2String(openKey), ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-      KsmKeyInfo keyInfo =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(openKeyData));
-      keyInfo.setDataSize(args.getDataSize());
-      keyInfo.setModificationTime(Time.now());
-      BatchOperation batch = new BatchOperation();
-      batch.delete(openKey);
-      batch.put(objectKeyBytes, keyInfo.getProtobuf().toByteArray());
-      metadataManager.writeBatch(batch);
-    } catch (KSMException e) {
-      throw e;
-    } catch (IOException ex) {
-      LOG.error("Key commit failed for volume:{} bucket:{} key:{}",
-          volumeName, bucketName, keyName, ex);
-      throw new KSMException(ex.getMessage(),
-          KSMException.ResultCodes.FAILED_KEY_ALLOCATION);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    try {
-      byte[] keyKey = metadataManager.getDBKeyBytes(
-          volumeName, bucketName, keyName);
-      byte[] value = metadataManager.get(keyKey);
-      if (value == null) {
-        LOG.debug("volume:{} bucket:{} Key:{} not found",
-            volumeName, bucketName, keyName);
-        throw new KSMException("Key not found",
-            KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-      return KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
-    } catch (DBException ex) {
-      LOG.error("Get key failed for volume:{} bucket:{} key:{}",
-          volumeName, bucketName, keyName, ex);
-      throw new KSMException(ex.getMessage(),
-          KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException {
-    Preconditions.checkNotNull(args);
-    Preconditions.checkNotNull(toKeyName);
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String fromKeyName = args.getKeyName();
-    if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
-      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.",
-          volumeName, bucketName, fromKeyName, toKeyName);
-      throw new KSMException("Key name is empty",
-          ResultCodes.FAILED_INVALID_KEY_NAME);
-    }
-
-    metadataManager.writeLock().lock();
-    try {
-      // fromKeyName should exist
-      byte[] fromKey = metadataManager.getDBKeyBytes(
-          volumeName, bucketName, fromKeyName);
-      byte[] fromKeyValue = metadataManager.get(fromKey);
-      if (fromKeyValue == null) {
-        // TODO: Add support for renaming open key
-        LOG.error(
-            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
-                + "Key: {} not found.", volumeName, bucketName, fromKeyName,
-            toKeyName, fromKeyName);
-        throw new KSMException("Key not found",
-            KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-
-      // toKeyName should not exist
-      byte[] toKey =
-          metadataManager.getDBKeyBytes(volumeName, bucketName, toKeyName);
-      byte[] toKeyValue = metadataManager.get(toKey);
-      if (toKeyValue != null) {
-        LOG.error(
-            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
-                + "Key: {} already exists.", volumeName, bucketName,
-            fromKeyName, toKeyName, toKeyName);
-        throw new KSMException("Key not found",
-            KSMException.ResultCodes.FAILED_KEY_ALREADY_EXISTS);
-      }
-
-      if (fromKeyName.equals(toKeyName)) {
-        return;
-      }
-
-      KsmKeyInfo newKeyInfo =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(fromKeyValue));
-      newKeyInfo.setKeyName(toKeyName);
-      newKeyInfo.updateModifcationTime();
-      BatchOperation batch = new BatchOperation();
-      batch.delete(fromKey);
-      batch.put(toKey, newKeyInfo.getProtobuf().toByteArray());
-      metadataManager.writeBatch(batch);
-    } catch (DBException ex) {
-      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.",
-          volumeName, bucketName, fromKeyName, toKeyName, ex);
-      throw new KSMException(ex.getMessage(),
-          ResultCodes.FAILED_KEY_RENAME);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public void deleteKey(KsmKeyArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    String keyName = args.getKeyName();
-    try {
-      byte[] objectKey = metadataManager.getDBKeyBytes(
-          volumeName, bucketName, keyName);
-      byte[] objectValue = metadataManager.get(objectKey);
-      if (objectValue == null) {
-        throw new KSMException("Key not found",
-            KSMException.ResultCodes.FAILED_KEY_NOT_FOUND);
-      }
-      byte[] deletingKey = metadataManager.getDeletedKeyName(objectKey);
-      BatchOperation batch = new BatchOperation();
-      batch.put(deletingKey, objectValue);
-      batch.delete(objectKey);
-      metadataManager.writeBatch(batch);
-    } catch (DBException ex) {
-      LOG.error(String.format("Delete key failed for volume:%s "
-          + "bucket:%s key:%s", volumeName, bucketName, keyName), ex);
-      throw new KSMException(ex.getMessage(), ex,
-          ResultCodes.FAILED_KEY_DELETION);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public List<KsmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix, int maxKeys) throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.listKeys(volumeName, bucketName,
-          startKey, keyPrefix, maxKeys);
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  @Override
-  public List<BlockGroup> getPendingDeletionKeys(final int count)
-      throws IOException {
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.getPendingDeletionKeys(count);
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  @Override
-  public void deletePendingDeletionKey(String objectKeyName)
-      throws IOException{
-    Preconditions.checkNotNull(objectKeyName);
-    if (!objectKeyName.startsWith(OzoneConsts.DELETING_KEY_PREFIX)) {
-      throw new IllegalArgumentException("Invalid key name,"
-          + " the name should be the key name with deleting prefix");
-    }
-
-    // Simply removes the entry from KSM DB.
-    metadataManager.writeLock().lock();
-    try {
-      byte[] pendingDelKey = DFSUtil.string2Bytes(objectKeyName);
-      byte[] delKeyValue = metadataManager.get(pendingDelKey);
-      if (delKeyValue == null) {
-        throw new IOException("Failed to delete key " + objectKeyName
-            + " because it is not found in DB");
-      }
-      metadataManager.delete(pendingDelKey);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  @Override
-  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.getExpiredOpenKeys();
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  @Override
-  public void deleteExpiredOpenKey(String objectKeyName) throws IOException {
-    Preconditions.checkNotNull(objectKeyName);
-    if (!objectKeyName.startsWith(OzoneConsts.OPEN_KEY_PREFIX)) {
-      throw new IllegalArgumentException("Invalid key name,"
-          + " the name should be the key name with open key prefix");
-    }
-
-    // Simply removes the entry from KSM DB.
-    metadataManager.writeLock().lock();
-    try {
-      byte[] openKey = DFSUtil.string2Bytes(objectKeyName);
-      byte[] delKeyValue = metadataManager.get(openKey);
-      if (delKeyValue == null) {
-        throw new IOException("Failed to delete key " + objectKeyName
-            + " because it is not found in DB");
-      }
-      metadataManager.delete(openKey);
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
deleted file mode 100644
index 5fa313b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ /dev/null
@@ -1,912 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.protobuf.BlockingService;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
-import org.apache.hadoop.ozone.common.Storage.StorageState;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos
-    .ServicePort;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.protocolPB
-    .KeySpaceManagerProtocolServerSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.hdds.scm.protocolPB
-    .StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.GenericOptionsParser;
-import org.apache.hadoop.util.StringUtils;
-
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.hdds.HddsUtils.isHddsEnabled;
-import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress;
-import static org.apache.hadoop.hdds.server.ServerUtils
-    .updateRPCListenAddress;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeySpaceManagerService
-    .newReflectiveBlockingService;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
-    .NodeState.HEALTHY;
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
-/**
- * Ozone Keyspace manager is the metadata manager of ozone.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"})
-public final class KeySpaceManager extends ServiceRuntimeInfoImpl
-    implements KeySpaceManagerProtocol, KSMMXBean {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeySpaceManager.class);
-
-  private static final String USAGE =
-      "Usage: \n ozone ksm [genericOptions] " + "[ "
-          + StartupOption.CREATEOBJECTSTORE.getName() + " ]\n " + "ozone ksm [ "
-          + StartupOption.HELP.getName() + " ]\n";
-
-  /** Startup options. */
-  public enum StartupOption {
-    CREATEOBJECTSTORE("-createObjectStore"),
-    HELP("-help"),
-    REGULAR("-regular");
-
-    private final String name;
-
-    StartupOption(String arg) {
-      this.name = arg;
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public static StartupOption parse(String value) {
-      for (StartupOption option : StartupOption.values()) {
-        if (option.name.equalsIgnoreCase(value)) {
-          return option;
-        }
-      }
-      return null;
-    }
-  }
-
-  private final OzoneConfiguration configuration;
-  private final RPC.Server ksmRpcServer;
-  private final InetSocketAddress ksmRpcAddress;
-  private final KSMMetadataManager metadataManager;
-  private final VolumeManager volumeManager;
-  private final BucketManager bucketManager;
-  private final KeyManager keyManager;
-  private final KSMMetrics metrics;
-  private final KeySpaceManagerHttpServer httpServer;
-  private final KSMStorage ksmStorage;
-  private final ScmBlockLocationProtocol scmBlockClient;
-  private final StorageContainerLocationProtocol scmContainerClient;
-  private ObjectName ksmInfoBeanName;
-
-  private KeySpaceManager(OzoneConfiguration conf) throws IOException {
-    Preconditions.checkNotNull(conf);
-    configuration = conf;
-    ksmStorage = new KSMStorage(conf);
-    scmBlockClient = getScmBlockClient(configuration);
-    scmContainerClient = getScmContainerClient(configuration);
-    if (ksmStorage.getState() != StorageState.INITIALIZED) {
-      throw new KSMException("KSM not initialized.",
-          ResultCodes.KSM_NOT_INITIALIZED);
-    }
-
-    // verifies that the SCM info in the KSM Version file is correct.
-    ScmInfo scmInfo = scmBlockClient.getScmInfo();
-    if (!(scmInfo.getClusterId().equals(ksmStorage.getClusterID()) && scmInfo
-        .getScmId().equals(ksmStorage.getScmId()))) {
-      throw new KSMException("SCM version info mismatch.",
-          ResultCodes.SCM_VERSION_MISMATCH_ERROR);
-    }
-    final int handlerCount = conf.getInt(OZONE_KSM_HANDLER_COUNT_KEY,
-        OZONE_KSM_HANDLER_COUNT_DEFAULT);
-
-    RPC.setProtocolEngine(configuration, KeySpaceManagerProtocolPB.class,
-        ProtobufRpcEngine.class);
-
-    BlockingService ksmService = newReflectiveBlockingService(
-        new KeySpaceManagerProtocolServerSideTranslatorPB(this));
-    final InetSocketAddress ksmNodeRpcAddr =
-        getKsmAddress(configuration);
-    ksmRpcServer = startRpcServer(configuration, ksmNodeRpcAddr,
-        KeySpaceManagerProtocolPB.class, ksmService,
-        handlerCount);
-    ksmRpcAddress = updateRPCListenAddress(configuration,
-        OZONE_KSM_ADDRESS_KEY, ksmNodeRpcAddr, ksmRpcServer);
-    metadataManager = new KSMMetadataManagerImpl(configuration);
-    volumeManager = new VolumeManagerImpl(metadataManager, configuration);
-    bucketManager = new BucketManagerImpl(metadataManager);
-    metrics = KSMMetrics.create();
-    keyManager =
-        new KeyManagerImpl(scmBlockClient, metadataManager, configuration,
-            ksmStorage.getKsmId());
-    httpServer = new KeySpaceManagerHttpServer(configuration, this);
-  }
-
-  /**
-   * Create a scm block client, used by putKey() and getKey().
-   *
-   * @return {@link ScmBlockLocationProtocol}
-   * @throws IOException
-   */
-  private static ScmBlockLocationProtocol getScmBlockClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
-    InetSocketAddress scmBlockAddress =
-        getScmAddressForBlockClients(conf);
-    ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
-        new ScmBlockLocationProtocolClientSideTranslatorPB(
-            RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
-                scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)));
-    return scmBlockLocationClient;
-  }
-
-  /**
-   * Returns a scm container client.
-   *
-   * @return {@link StorageContainerLocationProtocol}
-   * @throws IOException
-   */
-  private static StorageContainerLocationProtocol getScmContainerClient(
-      OzoneConfiguration conf) throws IOException {
-    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
-    InetSocketAddress scmAddr = getScmAddressForClients(
-        conf);
-    StorageContainerLocationProtocolClientSideTranslatorPB scmContainerClient =
-        new StorageContainerLocationProtocolClientSideTranslatorPB(
-            RPC.getProxy(StorageContainerLocationProtocolPB.class, scmVersion,
-                scmAddr, UserGroupInformation.getCurrentUser(), conf,
-                NetUtils.getDefaultSocketFactory(conf),
-                Client.getRpcTimeout(conf)));
-    return scmContainerClient;
-  }
-
-  @VisibleForTesting
-  public KeyManager getKeyManager() {
-    return keyManager;
-  }
-
-  @VisibleForTesting
-  public ScmInfo getScmInfo() throws IOException {
-    return scmBlockClient.getScmInfo();
-  }
-
-  @VisibleForTesting
-  public KSMStorage getKsmStorage() {
-    return ksmStorage;
-  }
-  /**
-   * Starts an RPC server, if configured.
-   *
-   * @param conf configuration
-   * @param addr configured address of RPC server
-   * @param protocol RPC protocol provided by RPC server
-   * @param instance RPC protocol implementation instance
-   * @param handlerCount RPC server handler count
-   *
-   * @return RPC server
-   * @throws IOException if there is an I/O error while creating RPC server
-   */
-  private static RPC.Server startRpcServer(OzoneConfiguration conf,
-      InetSocketAddress addr, Class<?> protocol, BlockingService instance,
-      int handlerCount) throws IOException {
-    RPC.Server rpcServer = new RPC.Builder(conf)
-        .setProtocol(protocol)
-        .setInstance(instance)
-        .setBindAddress(addr.getHostString())
-        .setPort(addr.getPort())
-        .setNumHandlers(handlerCount)
-        .setVerbose(false)
-        .setSecretManager(null)
-        .build();
-
-    DFSUtil.addPBProtocol(conf, protocol, instance, rpcServer);
-    return rpcServer;
-  }
-
-  /**
-   * Get metadata manager.
-   * @return metadata manager.
-   */
-  public KSMMetadataManager getMetadataManager() {
-    return metadataManager;
-  }
-
-  public KSMMetrics getMetrics() {
-    return metrics;
-  }
-
-  /**
-   * Main entry point for starting KeySpaceManager.
-   *
-   * @param argv arguments
-   * @throws IOException if startup fails due to I/O error
-   */
-  public static void main(String[] argv) throws IOException {
-    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
-      System.exit(0);
-    }
-    try {
-      OzoneConfiguration conf = new OzoneConfiguration();
-      GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
-      if (!hParser.isParseSuccessful()) {
-        System.err.println("USAGE: " + USAGE + " \n");
-        hParser.printGenericCommandUsage(System.err);
-        System.exit(1);
-      }
-      StringUtils.startupShutdownMessage(KeySpaceManager.class, argv, LOG);
-      KeySpaceManager ksm = createKSM(hParser.getRemainingArgs(), conf);
-      if (ksm != null) {
-        ksm.start();
-        ksm.join();
-      }
-    } catch (Throwable t) {
-      LOG.error("Failed to start the KeyspaceManager.", t);
-      terminate(1, t);
-    }
-  }
-
-  private static void printUsage(PrintStream out) {
-    out.println(USAGE + "\n");
-  }
-
-  /**
-   * Constructs KSM instance based on command line arguments.
-   * @param argv Command line arguments
-   * @param conf OzoneConfiguration
-   * @return KSM instance
-   * @throws IOException in case KSM instance creation fails.
-   */
-
-  public static KeySpaceManager createKSM(String[] argv,
-      OzoneConfiguration conf) throws IOException {
-    if (!isHddsEnabled(conf)) {
-      System.err.println("KSM cannot be started in secure mode or when " +
-          OZONE_ENABLED + " is set to false");
-      System.exit(1);
-    }
-    StartupOption startOpt = parseArguments(argv);
-    if (startOpt == null) {
-      printUsage(System.err);
-      terminate(1);
-      return null;
-    }
-    switch (startOpt) {
-    case CREATEOBJECTSTORE:
-      terminate(ksmInit(conf) ? 0 : 1);
-      return null;
-    case HELP:
-      printUsage(System.err);
-      terminate(0);
-      return null;
-    default:
-      return new KeySpaceManager(conf);
-    }
-  }
-
-  /**
-   * Initializes the KSM instance.
-   * @param conf OzoneConfiguration
-   * @return true if KSM initialization succeeds , false otherwise
-   * @throws IOException in case ozone metadata directory path is not accessible
-   */
-
-  private static boolean ksmInit(OzoneConfiguration conf) throws IOException {
-    KSMStorage ksmStorage = new KSMStorage(conf);
-    StorageState state = ksmStorage.getState();
-    if (state != StorageState.INITIALIZED) {
-      try {
-        ScmBlockLocationProtocol scmBlockClient = getScmBlockClient(conf);
-        ScmInfo scmInfo = scmBlockClient.getScmInfo();
-        String clusterId = scmInfo.getClusterId();
-        String scmId = scmInfo.getScmId();
-        if (clusterId == null || clusterId.isEmpty()) {
-          throw new IOException("Invalid Cluster ID");
-        }
-        if (scmId == null || scmId.isEmpty()) {
-          throw new IOException("Invalid SCM ID");
-        }
-        ksmStorage.setClusterId(clusterId);
-        ksmStorage.setScmId(scmId);
-        ksmStorage.initialize();
-        System.out.println(
-            "KSM initialization succeeded.Current cluster id for sd="
-                + ksmStorage.getStorageDir() + ";cid=" + ksmStorage
-                .getClusterID());
-        return true;
-      } catch (IOException ioe) {
-        LOG.error("Could not initialize KSM version file", ioe);
-        return false;
-      }
-    } else {
-      System.out.println(
-          "KSM already initialized.Reusing existing cluster id for sd="
-              + ksmStorage.getStorageDir() + ";cid=" + ksmStorage
-              .getClusterID());
-      return true;
-    }
-  }
-
-  /**
-   * Parses the command line options for KSM initialization.
-   * @param args command line arguments
-   * @return StartupOption if options are valid, null otherwise
-   */
-  private static StartupOption parseArguments(String[] args) {
-    if (args == null || args.length == 0) {
-      return StartupOption.REGULAR;
-    } else if (args.length == 1) {
-      return StartupOption.parse(args[0]);
-    }
-    return null;
-  }
-
-  /**
-   * Builds a message for logging startup information about an RPC server.
-   *
-   * @param description RPC server description
-   * @param addr RPC server listening address
-   * @return server startup message
-   */
-  private static String buildRpcServerStartMessage(String description,
-      InetSocketAddress addr) {
-    return addr != null ? String.format("%s is listening at %s",
-        description, addr.toString()) :
-        String.format("%s not started", description);
-  }
-
-  /**
-   * Start service.
-   */
-  public void start() throws IOException {
-    LOG.info(buildRpcServerStartMessage("KeyspaceManager RPC server",
-        ksmRpcAddress));
-    DefaultMetricsSystem.initialize("KeySpaceManager");
-    metadataManager.start();
-    keyManager.start();
-    ksmRpcServer.start();
-    httpServer.start();
-    registerMXBean();
-    setStartTime();
-  }
-
-  /**
-   * Stop service.
-   */
-  public void stop() {
-    try {
-      metadataManager.stop();
-      ksmRpcServer.stop();
-      keyManager.stop();
-      httpServer.stop();
-      metrics.unRegister();
-      unregisterMXBean();
-    } catch (Exception e) {
-      LOG.error("Key Space Manager stop failed.", e);
-    }
-  }
-
-  /**
-   * Wait until service has completed shutdown.
-   */
-  public void join() {
-    try {
-      ksmRpcServer.join();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-      LOG.info("Interrupted during KeyspaceManager join.", e);
-    }
-  }
-
-  /**
-   * Creates a volume.
-   *
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  @Override
-  public void createVolume(KsmVolumeArgs args) throws IOException {
-    try {
-      metrics.incNumVolumeCreates();
-      volumeManager.createVolume(args);
-    } catch (Exception ex) {
-      metrics.incNumVolumeCreateFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner) throws IOException {
-    try {
-      metrics.incNumVolumeUpdates();
-      volumeManager.setOwner(volume, owner);
-    } catch (Exception ex) {
-      metrics.incNumVolumeUpdateFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  @Override
-  public void setQuota(String volume, long quota) throws IOException {
-    try {
-      metrics.incNumVolumeUpdates();
-      volumeManager.setQuota(volume, quota);
-    } catch (Exception ex) {
-      metrics.incNumVolumeUpdateFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Checks if the specified user can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume,
-   *         false otherwise
-   * @throws IOException
-   */
-  @Override
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException {
-    try {
-      metrics.incNumVolumeCheckAccesses();
-      return volumeManager.checkVolumeAccess(volume, userAcl);
-    } catch (Exception ex) {
-      metrics.incNumVolumeCheckAccessFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   *
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    try {
-      metrics.incNumVolumeInfos();
-      return volumeManager.getVolumeInfo(volume);
-    } catch (Exception ex) {
-      metrics.incNumVolumeInfoFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    try {
-      metrics.incNumVolumeDeletes();
-      volumeManager.deleteVolume(volume);
-    } catch (Exception ex) {
-      metrics.incNumVolumeDeleteFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Lists volume owned by a specific user.
-   *
-   * @param userName - user name
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<KsmVolumeArgs> listVolumeByUser(String userName, String prefix,
-      String prevKey, int maxKeys) throws IOException {
-    try {
-      metrics.incNumVolumeLists();
-      return volumeManager.listVolumes(userName, prefix, prevKey, maxKeys);
-    } catch (Exception ex) {
-      metrics.incNumVolumeListFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Lists volume all volumes in the cluster.
-   *
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<KsmVolumeArgs> listAllVolumes(String prefix, String prevKey, int
-      maxKeys) throws IOException {
-    try {
-      metrics.incNumVolumeLists();
-      return volumeManager.listVolumes(null, prefix, prevKey, maxKeys);
-    } catch (Exception ex) {
-      metrics.incNumVolumeListFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Creates a bucket.
-   *
-   * @param bucketInfo - BucketInfo to create bucket.
-   * @throws IOException
-   */
-  @Override
-  public void createBucket(KsmBucketInfo bucketInfo) throws IOException {
-    try {
-      metrics.incNumBucketCreates();
-      bucketManager.createBucket(bucketInfo);
-    } catch (Exception ex) {
-      metrics.incNumBucketCreateFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<KsmBucketInfo> listBuckets(String volumeName,
-      String startKey, String prefix, int maxNumOfBuckets)
-      throws IOException {
-    try {
-      metrics.incNumBucketLists();
-      return bucketManager.listBuckets(volumeName,
-          startKey, prefix, maxNumOfBuckets);
-    } catch (IOException ex) {
-      metrics.incNumBucketListFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Gets the bucket information.
-   *
-   * @param volume - Volume name.
-   * @param bucket - Bucket name.
-   * @return KsmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public KsmBucketInfo getBucketInfo(String volume, String bucket)
-      throws IOException {
-    try {
-      metrics.incNumBucketInfos();
-      return bucketManager.getBucketInfo(volume, bucket);
-    } catch (Exception ex) {
-      metrics.incNumBucketInfoFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Allocate a key.
-   *
-   * @param args - attributes of the key.
-   * @return KsmKeyInfo - the info about the allocated key.
-   * @throws IOException
-   */
-  @Override
-  public OpenKeySession openKey(KsmKeyArgs args) throws IOException {
-    try {
-      metrics.incNumKeyAllocates();
-      return keyManager.openKey(args);
-    } catch (Exception ex) {
-      metrics.incNumKeyAllocateFails();
-      throw ex;
-    }
-  }
-
-  @Override
-  public void commitKey(KsmKeyArgs args, int clientID)
-      throws IOException {
-    try {
-      metrics.incNumKeyCommits();
-      keyManager.commitKey(args, clientID);
-    } catch (Exception ex) {
-      metrics.incNumKeyCommitFails();
-      throw ex;
-    }
-  }
-
-  @Override
-  public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException {
-    try {
-      metrics.incNumBlockAllocateCalls();
-      return keyManager.allocateBlock(args, clientID);
-    } catch (Exception ex) {
-      metrics.incNumBlockAllocateCallFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Lookup a key.
-   *
-   * @param args - attributes of the key.
-   * @return KsmKeyInfo - the info about the requested key.
-   * @throws IOException
-   */
-  @Override
-  public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException {
-    try {
-      metrics.incNumKeyLookups();
-      return keyManager.lookupKey(args);
-    } catch (Exception ex) {
-      metrics.incNumKeyLookupFails();
-      throw ex;
-    }
-  }
-
-  @Override
-  public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException {
-    try {
-      metrics.incNumKeyRenames();
-      keyManager.renameKey(args, toKeyName);
-    } catch (IOException e) {
-      metrics.incNumKeyRenameFails();
-      throw e;
-    }
-  }
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args - attributes of the key.
-   * @throws IOException
-   */
-  @Override
-  public void deleteKey(KsmKeyArgs args) throws IOException {
-    try {
-      metrics.incNumKeyDeletes();
-      keyManager.deleteKey(args);
-    } catch (Exception ex) {
-      metrics.incNumKeyDeleteFails();
-      throw ex;
-    }
-  }
-
-  @Override
-  public List<KsmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix, int maxKeys) throws IOException {
-    try {
-      metrics.incNumKeyLists();
-      return keyManager.listKeys(volumeName, bucketName,
-          startKey, keyPrefix, maxKeys);
-    } catch (IOException ex) {
-      metrics.incNumKeyListFails();
-      throw ex;
-    }
-  }
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  @Override
-  public void setBucketProperty(KsmBucketArgs args)
-      throws IOException {
-    try {
-      metrics.incNumBucketUpdates();
-      bucketManager.setBucketProperty(args);
-    } catch (Exception ex) {
-      metrics.incNumBucketUpdateFails();
-      throw ex;
-    }
-  }
-
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  public void deleteBucket(String volume, String bucket) throws IOException {
-    try {
-      metrics.incNumBucketDeletes();
-      bucketManager.deleteBucket(volume, bucket);
-    } catch (Exception ex) {
-      metrics.incNumBucketDeleteFails();
-      throw ex;
-    }
-  }
-
-  private void registerMXBean() {
-    Map<String, String> jmxProperties = new HashMap<String, String>();
-    jmxProperties.put("component", "ServerRuntime");
-    this.ksmInfoBeanName =
-        MBeans.register("KeySpaceManager",
-            "KeySpaceManagerInfo",
-            jmxProperties,
-            this);
-  }
-
-  private void unregisterMXBean() {
-    if (this.ksmInfoBeanName != null) {
-      MBeans.unregister(this.ksmInfoBeanName);
-      this.ksmInfoBeanName = null;
-    }
-  }
-
-  @Override
-  public String getRpcPort() {
-    return "" + ksmRpcAddress.getPort();
-  }
-
-  @VisibleForTesting
-  public KeySpaceManagerHttpServer getHttpServer() {
-    return httpServer;
-  }
-
-  @Override
-  public List<ServiceInfo> getServiceList() throws IOException {
-    // When we implement multi-home this call has to be handled properly.
-    List<ServiceInfo> services = new ArrayList<>();
-    ServiceInfo.Builder ksmServiceInfoBuilder = ServiceInfo.newBuilder()
-        .setNodeType(HddsProtos.NodeType.KSM)
-        .setHostname(ksmRpcAddress.getHostName())
-        .addServicePort(ServicePort.newBuilder()
-                .setType(ServicePort.Type.RPC)
-                .setValue(ksmRpcAddress.getPort())
-            .build());
-    if (httpServer.getHttpAddress() != null) {
-      ksmServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-          .setType(ServicePort.Type.HTTP)
-          .setValue(httpServer.getHttpAddress().getPort())
-          .build());
-    }
-    if (httpServer.getHttpsAddress() != null) {
-      ksmServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-          .setType(ServicePort.Type.HTTPS)
-          .setValue(httpServer.getHttpsAddress().getPort())
-          .build());
-    }
-    services.add(ksmServiceInfoBuilder.build());
-
-    // For client we have to return SCM with container protocol port,
-    // not block protocol.
-    InetSocketAddress scmAddr = getScmAddressForClients(
-        configuration);
-    ServiceInfo.Builder scmServiceInfoBuilder = ServiceInfo.newBuilder()
-        .setNodeType(HddsProtos.NodeType.SCM)
-        .setHostname(scmAddr.getHostName())
-        .addServicePort(ServicePort.newBuilder()
-            .setType(ServicePort.Type.RPC)
-            .setValue(scmAddr.getPort()).build());
-    services.add(scmServiceInfoBuilder.build());
-
-    List<HddsProtos.Node> nodes = scmContainerClient.queryNode(HEALTHY,
-        HddsProtos.QueryScope.CLUSTER, "");
-
-    for (HddsProtos.Node node : nodes) {
-      HddsProtos.DatanodeDetailsProto datanode = node.getNodeID();
-
-      ServiceInfo.Builder dnServiceInfoBuilder = ServiceInfo.newBuilder()
-          .setNodeType(HddsProtos.NodeType.DATANODE)
-          .setHostname(datanode.getHostName());
-
-      dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder()
-          .setType(ServicePort.Type.HTTP)
-          .setValue(DatanodeDetails.getFromProtoBuf(datanode)
-              .getPort(DatanodeDetails.Port.Name.REST).getValue())
-          .build());
-
-      services.add(dnServiceInfoBuilder.build());
-    }
-
-    metrics.incNumGetServiceLists();
-    // For now there is no exception that can can happen in this call,
-    // so failure metrics is not handled. In future if there is any need to
-    // handle exception in this method, we need to incorporate
-    // metrics.incNumGetServiceListFails()
-    return services;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
deleted file mode 100644
index 478804b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.server.BaseHttpServer;
-
-import java.io.IOException;
-
-/**
- * HttpServer wrapper for the KeySpaceManager.
- */
-public class KeySpaceManagerHttpServer extends BaseHttpServer {
-
-  public KeySpaceManagerHttpServer(Configuration conf, KeySpaceManager ksm)
-      throws IOException {
-    super(conf, "ksm");
-    addServlet("serviceList", "/serviceList", ServiceListJSONServlet.class);
-    getWebAppContext().setAttribute(OzoneConsts.KSM_CONTEXT_ATTRIBUTE, ksm);
-  }
-
-  @Override protected String getHttpAddressKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpBindHostKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_BIND_HOST_KEY;
-  }
-
-  @Override protected String getHttpsAddressKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTPS_ADDRESS_KEY;
-  }
-
-  @Override protected String getHttpsBindHostKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTPS_BIND_HOST_KEY;
-  }
-
-  @Override protected String getBindHostDefault() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_BIND_HOST_DEFAULT;
-  }
-
-  @Override protected int getHttpBindPortDefault() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected int getHttpsBindPortDefault() {
-    return KSMConfigKeys.OZONE_KSM_HTTPS_BIND_PORT_DEFAULT;
-  }
-
-  @Override protected String getKeytabFile() {
-    return KSMConfigKeys.OZONE_KSM_KEYTAB_FILE;
-  }
-
-  @Override protected String getSpnegoPrincipal() {
-    return OzoneConfigKeys.OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL;
-  }
-
-  @Override protected String getEnabledKey() {
-    return KSMConfigKeys.OZONE_KSM_HTTP_ENABLED_KEY;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
deleted file mode 100644
index 8e2540a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.BackgroundTask;
-import org.apache.hadoop.utils.BackgroundTaskQueue;
-import org.apache.hadoop.utils.BackgroundTaskResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-/**
- * This is the background service to delete hanging open keys.
- * Scan the metadata of ksm periodically to get
- * the keys with prefix "#open#" and ask scm to
- * delete metadata accordingly, if scm returns
- * success for keys, then clean up those keys.
- */
-public class OpenKeyCleanupService extends BackgroundService {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OpenKeyCleanupService.class);
-
-  private final static int OPEN_KEY_DELETING_CORE_POOL_SIZE = 2;
-
-  private final KeyManager keyManager;
-  private final ScmBlockLocationProtocol scmClient;
-
-  public OpenKeyCleanupService(ScmBlockLocationProtocol scmClient,
-      KeyManager keyManager, int serviceInterval,
-      long serviceTimeout) {
-    super("OpenKeyCleanupService", serviceInterval, TimeUnit.SECONDS,
-        OPEN_KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
-    this.keyManager = keyManager;
-    this.scmClient = scmClient;
-  }
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    queue.add(new OpenKeyDeletingTask());
-    return queue;
-  }
-
-  private class OpenKeyDeletingTask
-      implements BackgroundTask<BackgroundTaskResult> {
-
-    @Override
-    public int getPriority() {
-      return 0;
-    }
-
-    @Override
-    public BackgroundTaskResult call() throws Exception {
-      try {
-        List<BlockGroup> keyBlocksList = keyManager.getExpiredOpenKeys();
-        if (keyBlocksList.size() > 0) {
-          int toDeleteSize = keyBlocksList.size();
-          LOG.debug("Found {} to-delete open keys in KSM", toDeleteSize);
-          List<DeleteBlockGroupResult> results =
-              scmClient.deleteKeyBlocks(keyBlocksList);
-          int deletedSize = 0;
-          for (DeleteBlockGroupResult result : results) {
-            if (result.isSuccess()) {
-              try {
-                keyManager.deleteExpiredOpenKey(result.getObjectKey());
-                LOG.debug("Key {} deleted from KSM DB", result.getObjectKey());
-                deletedSize += 1;
-              } catch (IOException e) {
-                LOG.warn("Failed to delete hanging-open key {}",
-                    result.getObjectKey(), e);
-              }
-            } else {
-              LOG.warn("Deleting open Key {} failed because some of the blocks"
-                      + " were failed to delete, failed blocks: {}",
-                  result.getObjectKey(),
-                  StringUtils.join(",", result.getFailedBlocks()));
-            }
-          }
-          LOG.info("Found {} expired open key entries, successfully " +
-              "cleaned up {} entries", toDeleteSize, deletedSize);
-          return results::size;
-        } else {
-          LOG.debug("No hanging open key fond in KSM");
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to get hanging open keys, retry in"
-            + " next interval", e);
-      }
-      return BackgroundTaskResult.EmptyTaskResult.newResult();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java
deleted file mode 100644
index 34a80ce..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/ServiceListJSONServlet.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.io.PrintWriter;
-
-
-/**
- * Provides REST access to Ozone Service List.
- * <p>
- * This servlet generally will be placed under the /serviceList URL of
- * KeySpaceManager HttpServer.
- *
- * The return format is of JSON and in the form
- * <p>
- *  <code><pre>
- *  {
- *    "services" : [
- *      {
- *        "NodeType":"KSM",
- *        "Hostname" "$hostname",
- *        "ports" : {
- *          "$PortType" : "$port",
- *          ...
- *        }
- *      }
- *    ]
- *  }
- *  </pre></code>
- *  <p>
- *
- */
-public class ServiceListJSONServlet  extends HttpServlet  {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ServiceListJSONServlet.class);
-  private static final long serialVersionUID = 1L;
-
-  private KeySpaceManager ksm;
-
-  public void init() throws ServletException {
-    this.ksm = (KeySpaceManager) getServletContext()
-        .getAttribute(OzoneConsts.KSM_CONTEXT_ATTRIBUTE);
-  }
-
-  /**
-   * Process a GET request for the specified resource.
-   *
-   * @param request
-   *          The servlet request we are processing
-   * @param response
-   *          The servlet response we are creating
-   */
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response) {
-    try {
-      ObjectMapper objectMapper = new ObjectMapper();
-      objectMapper.enable(SerializationFeature.INDENT_OUTPUT);
-      response.setContentType("application/json; charset=utf8");
-      PrintWriter writer = response.getWriter();
-      try {
-        writer.write(objectMapper.writeValueAsString(ksm.getServiceList()));
-      } finally {
-        if (writer != null) {
-          writer.close();
-        }
-      }
-    } catch (IOException e) {
-      LOG.error(
-          "Caught an exception while processing ServiceList request", e);
-      response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java
deleted file mode 100644
index 6ac78d6..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * KSM volume manager interface.
- */
-public interface VolumeManager {
-
-  /**
-   * Create a new volume.
-   * @param args - Volume args to create a volume
-   */
-  void createVolume(KsmVolumeArgs args) throws IOException;
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  void setOwner(String volume, String owner) throws IOException;
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  void setQuota(String volume, long quota) throws IOException;
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  KsmVolumeArgs getVolumeInfo(String volume) throws IOException;
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  void deleteVolume(String volume) throws IOException;
-
-  /**
-   * Checks if the specified user with a role can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acl which needs to be checked for access
-   * @return true if the user has access for the volume, false otherwise
-   * @throws IOException
-   */
-  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException;
-
-  /**
-   * Returns a list of volumes owned by a given user; if user is null,
-   * returns all volumes.
-   *
-   * @param userName
-   *   volume owner
-   * @param prefix
-   *   the volume prefix used to filter the listing result.
-   * @param startKey
-   *   the start volume name determines where to start listing from,
-   *   this key is excluded from the result.
-   * @param maxKeys
-   *   the maximum number of volumes to return.
-   * @return a list of {@link KsmVolumeArgs}
-   * @throws IOException
-   */
-  List<KsmVolumeArgs> listVolumes(String userName, String prefix,
-      String startKey, int maxKeys) throws IOException;
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: Revert "Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk"

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
deleted file mode 100644
index 924411a..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
-
-import com.google.common.collect.Sets;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-
-import java.util.List;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This test helper class is primarily used by
- * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
- */
-public class FairSchedulerJsonVerifications {
-
-  private static final Set<String> RESOURCE_FIELDS =
-      Sets.newHashSet("minResources", "amUsedResources", "amMaxResources",
-          "fairResources", "clusterResources", "reservedResources",
-              "maxResources", "usedResources", "steadyFairResources",
-              "demandResources");
-  private final Set<String> customResourceTypes;
-
-  FairSchedulerJsonVerifications(List<String> customResourceTypes) {
-    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
-  }
-
-  public void verify(JSONObject jsonObject) {
-    try {
-      verifyResourcesContainDefaultResourceTypes(jsonObject, RESOURCE_FIELDS);
-      verifyResourcesContainCustomResourceTypes(jsonObject, RESOURCE_FIELDS);
-    } catch (JSONException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void verifyResourcesContainDefaultResourceTypes(JSONObject queue,
-      Set<String> resourceCategories) throws JSONException {
-    for (String resourceCategory : resourceCategories) {
-      boolean hasResourceCategory = queue.has(resourceCategory);
-      assertTrue("Queue " + queue + " does not have resource category key: "
-          + resourceCategory, hasResourceCategory);
-      verifyResourceContainsDefaultResourceTypes(
-          queue.getJSONObject(resourceCategory));
-    }
-  }
-
-  private void verifyResourceContainsDefaultResourceTypes(
-      JSONObject jsonObject) {
-    Object memory = jsonObject.opt("memory");
-    Object vCores = jsonObject.opt("vCores");
-
-    assertNotNull("Key 'memory' not found in: " + jsonObject, memory);
-    assertNotNull("Key 'vCores' not found in: " + jsonObject, vCores);
-  }
-
-  private void verifyResourcesContainCustomResourceTypes(JSONObject queue,
-      Set<String> resourceCategories) throws JSONException {
-    for (String resourceCategory : resourceCategories) {
-      assertTrue("Queue " + queue + " does not have resource category key: "
-          + resourceCategory, queue.has(resourceCategory));
-      verifyResourceContainsAllCustomResourceTypes(
-          queue.getJSONObject(resourceCategory));
-    }
-  }
-
-  private void verifyResourceContainsAllCustomResourceTypes(
-      JSONObject resourceCategory) throws JSONException {
-    assertTrue("resourceCategory does not have resourceInformations: "
-        + resourceCategory, resourceCategory.has("resourceInformations"));
-
-    JSONObject resourceInformations =
-        resourceCategory.getJSONObject("resourceInformations");
-    assertTrue(
-        "resourceInformations does not have resourceInformation object: "
-            + resourceInformations,
-        resourceInformations.has("resourceInformation"));
-    JSONArray customResources =
-        resourceInformations.getJSONArray("resourceInformation");
-
-    // customResources will include vcores / memory as well
-    assertEquals(
-        "Different number of custom resource types found than expected",
-        customResourceTypes.size(), customResources.length() - 2);
-
-    for (int i = 0; i < customResources.length(); i++) {
-      JSONObject customResource = customResources.getJSONObject(i);
-      assertTrue("Resource type does not have name field: " + customResource,
-          customResource.has("name"));
-      assertTrue("Resource type does not have name resourceType field: "
-          + customResource, customResource.has("resourceType"));
-      assertTrue(
-          "Resource type does not have name units field: " + customResource,
-          customResource.has("units"));
-      assertTrue(
-          "Resource type does not have name value field: " + customResource,
-          customResource.has("value"));
-
-      String name = customResource.getString("name");
-      String unit = customResource.getString("units");
-      String resourceType = customResource.getString("resourceType");
-      Long value = customResource.getLong("value");
-
-      if (ResourceInformation.MEMORY_URI.equals(name)
-          || ResourceInformation.VCORES_URI.equals(name)) {
-        continue;
-      }
-
-      assertTrue("Custom resource type " + name + " not found",
-          customResourceTypes.contains(name));
-      assertEquals("k", unit);
-      assertEquals(ResourceTypes.COUNTABLE,
-          ResourceTypes.valueOf(resourceType));
-      assertNotNull("Custom resource value " + value + " is null!", value);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
deleted file mode 100644
index 63ae7b7..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
-
-
-import com.google.common.collect.Sets;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import java.util.List;
-import java.util.Set;
-
-import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This test helper class is primarily used by
- * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
- */
-public class FairSchedulerXmlVerifications {
-
-  private static final Set<String> RESOURCE_FIELDS = Sets.newHashSet(
-      "minResources", "amUsedResources", "amMaxResources", "fairResources",
-      "clusterResources", "reservedResources", "maxResources", "usedResources",
-      "steadyFairResources", "demandResources");
-  private final Set<String> customResourceTypes;
-
-  FairSchedulerXmlVerifications(List<String> customResourceTypes) {
-    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
-  }
-
-  public void verify(Element element) {
-    verifyResourcesContainDefaultResourceTypes(element, RESOURCE_FIELDS);
-    verifyResourcesContainCustomResourceTypes(element, RESOURCE_FIELDS);
-  }
-
-  private void verifyResourcesContainDefaultResourceTypes(Element queue,
-      Set<String> resourceCategories) {
-    for (String resourceCategory : resourceCategories) {
-      boolean hasResourceCategory = hasChild(queue, resourceCategory);
-      assertTrue("Queue " + queue + " does not have resource category key: "
-          + resourceCategory, hasResourceCategory);
-      verifyResourceContainsDefaultResourceTypes(
-              (Element) queue.getElementsByTagName(resourceCategory).item(0));
-    }
-  }
-
-  private void verifyResourceContainsDefaultResourceTypes(
-      Element element) {
-    Object memory = opt(element, "memory");
-    Object vCores = opt(element, "vCores");
-
-    assertNotNull("Key 'memory' not found in: " + element, memory);
-    assertNotNull("Key 'vCores' not found in: " + element, vCores);
-  }
-
-  private void verifyResourcesContainCustomResourceTypes(Element queue,
-      Set<String> resourceCategories) {
-    for (String resourceCategory : resourceCategories) {
-      assertTrue("Queue " + queue + " does not have key for resourceCategory: "
-          + resourceCategory, hasChild(queue, resourceCategory));
-      verifyResourceContainsCustomResourceTypes(
-              (Element) queue.getElementsByTagName(resourceCategory).item(0));
-    }
-  }
-
-  private void verifyResourceContainsCustomResourceTypes(
-      Element resourceCategory) {
-    assertEquals(
-        toXml(resourceCategory)
-            + " should have only one resourceInformations child!",
-        1, resourceCategory.getElementsByTagName("resourceInformations")
-            .getLength());
-    Element resourceInformations = (Element) resourceCategory
-        .getElementsByTagName("resourceInformations").item(0);
-
-    NodeList customResources =
-        resourceInformations.getElementsByTagName("resourceInformation");
-
-    // customResources will include vcores / memory as well
-    assertEquals(
-        "Different number of custom resource types found than expected",
-        customResourceTypes.size(), customResources.getLength() - 2);
-
-    for (int i = 0; i < customResources.getLength(); i++) {
-      Element customResource = (Element) customResources.item(i);
-      String name = getXmlString(customResource, "name");
-      String unit = getXmlString(customResource, "units");
-      String resourceType = getXmlString(customResource, "resourceType");
-      Long value = getXmlLong(customResource, "value");
-
-      if (ResourceInformation.MEMORY_URI.equals(name)
-          || ResourceInformation.VCORES_URI.equals(name)) {
-        continue;
-      }
-
-      assertTrue("Custom resource type " + name + " not found",
-          customResourceTypes.contains(name));
-      assertEquals("k", unit);
-      assertEquals(ResourceTypes.COUNTABLE,
-          ResourceTypes.valueOf(resourceType));
-      assertNotNull("Resource value should not be null for resource type "
-          + resourceType + ", listing xml contents: " + toXml(customResource),
-          value);
-    }
-  }
-
-  private Object opt(Node node, String child) {
-    NodeList nodes = getElementsByTagNameInternal(node, child);
-    if (nodes.getLength() > 0) {
-      return nodes.item(0);
-    }
-
-    return null;
-  }
-
-  private boolean hasChild(Node node, String child) {
-    return getElementsByTagNameInternal(node, child).getLength() > 0;
-  }
-
-  private NodeList getElementsByTagNameInternal(Node node, String child) {
-    if (node instanceof Element) {
-      return ((Element) node).getElementsByTagName(child);
-    } else if (node instanceof Document) {
-      return ((Document) node).getElementsByTagName(child);
-    } else {
-      throw new IllegalStateException("Unknown type of wrappedObject: " + node
-          + ", type: " + node.getClass());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
deleted file mode 100644
index de4d5a1..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
-
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.*;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
-import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
-import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
-import org.apache.hadoop.yarn.webapp.JerseyTestBase;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.w3c.dom.Element;
-import javax.ws.rs.core.MediaType;
-import java.lang.reflect.Method;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class is to test response representations of queue resources,
- * explicitly setting custom resource types. with the help of
- * {@link CustomResourceTypesConfigurationProvider}
- */
-public class TestRMWebServicesFairSchedulerCustomResourceTypes
-    extends JerseyTestBase {
-  private static MockRM rm;
-  private static YarnConfiguration conf;
-
-  private static class WebServletModule extends ServletModule {
-    @Override
-    protected void configureServlets() {
-      bind(JAXBContextResolver.class);
-      bind(RMWebServices.class);
-      bind(GenericExceptionHandler.class);
-      conf = new YarnConfiguration();
-      conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
-          ResourceScheduler.class);
-      initResourceTypes(conf);
-      rm = new MockRM(conf);
-      bind(ResourceManager.class).toInstance(rm);
-      serve("/*").with(GuiceContainer.class);
-    }
-
-    private void initResourceTypes(YarnConfiguration conf) {
-      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
-          CustomResourceTypesConfigurationProvider.class.getName());
-      ResourceUtils.resetResourceTypes(conf);
-    }
-  }
-
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    createInjectorForWebServletModule();
-  }
-
-  @After
-  public void tearDown() {
-    ResourceUtils.resetResourceTypes(new Configuration());
-  }
-
-  private void createInjectorForWebServletModule() {
-    GuiceServletConfig
-        .setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
-  @After
-  public void teardown() {
-    CustomResourceTypesConfigurationProvider.reset();
-  }
-
-  public TestRMWebServicesFairSchedulerCustomResourceTypes() {
-    super(new WebAppDescriptor.Builder(
-        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-            .contextListenerClass(GuiceServletConfig.class)
-            .filterClass(com.google.inject.servlet.GuiceFilter.class)
-            .contextPath("jersey-guice-filter").servletPath("/").build());
-  }
-
-  @Test
-  public void testClusterSchedulerWithCustomResourceTypesJson() {
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
-    QueueManager queueManager = scheduler.getQueueManager();
-    // create LeafQueues
-    queueManager.getLeafQueue("root.q.subqueue1", true);
-    queueManager.getLeafQueue("root.q.subqueue2", true);
-
-    FSLeafQueue subqueue1 =
-        queueManager.getLeafQueue("root.q.subqueue1", false);
-    incrementUsedResourcesOnQueue(subqueue1, 33L);
-
-    WebResource path =
-        resource().path("ws").path("v1").path("cluster").path("scheduler");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-
-    verifyJsonResponse(path, response,
-            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  @Test
-  public void testClusterSchedulerWithCustomResourceTypesXml() {
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
-    QueueManager queueManager = scheduler.getQueueManager();
-    // create LeafQueues
-    queueManager.getLeafQueue("root.q.subqueue1", true);
-    queueManager.getLeafQueue("root.q.subqueue2", true);
-
-    FSLeafQueue subqueue1 =
-        queueManager.getLeafQueue("root.q.subqueue1", false);
-    incrementUsedResourcesOnQueue(subqueue1, 33L);
-
-    WebResource path =
-        resource().path("ws").path("v1").path("cluster").path("scheduler");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-
-    verifyXmlResponse(path, response,
-        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  @Test
-  public void testClusterSchedulerWithElevenCustomResourceTypesXml() {
-    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
-    createInjectorForWebServletModule();
-
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
-    QueueManager queueManager = scheduler.getQueueManager();
-    // create LeafQueues
-    queueManager.getLeafQueue("root.q.subqueue1", true);
-    queueManager.getLeafQueue("root.q.subqueue2", true);
-
-    FSLeafQueue subqueue1 =
-        queueManager.getLeafQueue("root.q.subqueue1", false);
-    incrementUsedResourcesOnQueue(subqueue1, 33L);
-
-    WebResource path =
-        resource().path("ws").path("v1").path("cluster").path("scheduler");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-
-    verifyXmlResponse(path, response,
-        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  @Test
-  public void testClusterSchedulerElevenWithCustomResourceTypesJson() {
-    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
-    createInjectorForWebServletModule();
-
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
-    QueueManager queueManager = scheduler.getQueueManager();
-    // create LeafQueues
-    queueManager.getLeafQueue("root.q.subqueue1", true);
-    queueManager.getLeafQueue("root.q.subqueue2", true);
-
-    FSLeafQueue subqueue1 =
-        queueManager.getLeafQueue("root.q.subqueue1", false);
-    incrementUsedResourcesOnQueue(subqueue1, 33L);
-
-    WebResource path =
-        resource().path("ws").path("v1").path("cluster").path("scheduler");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-
-    verifyJsonResponse(path, response,
-        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  private void verifyJsonResponse(WebResource path, ClientResponse response,
-      List<String> customResourceTypes) {
-    JsonCustomResourceTypeTestcase testCase =
-        new JsonCustomResourceTypeTestcase(path,
-            new BufferedClientResponse(response));
-    testCase.verify(json -> {
-      try {
-        JSONArray queues = json.getJSONObject("scheduler")
-            .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
-            .getJSONObject("childQueues").getJSONArray("queue");
-
-        // childQueueInfo consists of subqueue1 and subqueue2 info
-        assertEquals(2, queues.length());
-        JSONObject firstChildQueue = queues.getJSONObject(0);
-        new FairSchedulerJsonVerifications(customResourceTypes)
-            .verify(firstChildQueue);
-      } catch (JSONException e) {
-        throw new RuntimeException(e);
-      }
-    });
-  }
-
-  private void verifyXmlResponse(WebResource path, ClientResponse response,
-          List<String> customResourceTypes) {
-    XmlCustomResourceTypeTestCase testCase = new XmlCustomResourceTypeTestCase(
-        path, new BufferedClientResponse(response));
-
-    testCase.verify(xml -> {
-      Element scheduler =
-          (Element) xml.getElementsByTagName("scheduler").item(0);
-      Element schedulerInfo =
-          (Element) scheduler.getElementsByTagName("schedulerInfo").item(0);
-      Element rootQueue =
-          (Element) schedulerInfo.getElementsByTagName("rootQueue").item(0);
-
-      Element childQueues =
-          (Element) rootQueue.getElementsByTagName("childQueues").item(0);
-      Element queue =
-          (Element) childQueues.getElementsByTagName("queue").item(0);
-      new FairSchedulerXmlVerifications(customResourceTypes).verify(queue);
-    });
-  }
-
-  private void incrementUsedResourcesOnQueue(final FSLeafQueue queue,
-      final long value) {
-    try {
-      Method incUsedResourceMethod = queue.getClass().getSuperclass()
-          .getDeclaredMethod("incUsedResource", Resource.class);
-      incUsedResourceMethod.setAccessible(true);
-
-      Map<String, Long> customResources =
-          CustomResourceTypesConfigurationProvider.getCustomResourceTypes()
-              .stream()
-              .collect(Collectors.toMap(Function.identity(), v -> value));
-
-      incUsedResourceMethod.invoke(queue,
-          Resource.newInstance(20, 30, customResources));
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
deleted file mode 100644
index 4ab1443..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringEqual;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
-import static org.junit.Assert.*;
-
-/**
- * Contains all value verifications that are needed to verify {@link AppInfo}
- * JSON objects.
- */
-public final class AppInfoJsonVerifications {
-
-  private AppInfoJsonVerifications() {
-    //utility class
-  }
-
-  /**
-   * Tests whether {@link AppInfo} representation object contains the required
-   * values as per defined in the specified app parameter.
-   * @param  app  an RMApp instance that contains the required values
-   *              to test against.
-   */
-  public static void verify(JSONObject info, RMApp app) throws JSONException {
-    checkStringMatch("id", app.getApplicationId().toString(),
-        info.getString("id"));
-    checkStringMatch("user", app.getUser(), info.getString("user"));
-    checkStringMatch("name", app.getName(), info.getString("name"));
-    checkStringMatch("applicationType", app.getApplicationType(),
-        info.getString("applicationType"));
-    checkStringMatch("queue", app.getQueue(), info.getString("queue"));
-    assertEquals("priority doesn't match", 0, info.getInt("priority"));
-    checkStringMatch("state", app.getState().toString(),
-        info.getString("state"));
-    checkStringMatch("finalStatus", app.getFinalApplicationStatus().toString(),
-        info.getString("finalStatus"));
-    assertEquals("progress doesn't match", 0,
-        (float) info.getDouble("progress"), 0.0);
-    if ("UNASSIGNED".equals(info.getString("trackingUI"))) {
-      checkStringMatch("trackingUI", "UNASSIGNED",
-          info.getString("trackingUI"));
-    }
-    checkStringEqual("diagnostics", app.getDiagnostics().toString(),
-        info.getString("diagnostics"));
-    assertEquals("clusterId doesn't match",
-        ResourceManager.getClusterTimeStamp(), info.getLong("clusterId"));
-    assertEquals("startedTime doesn't match", app.getStartTime(),
-        info.getLong("startedTime"));
-    assertEquals("finishedTime doesn't match", app.getFinishTime(),
-        info.getLong("finishedTime"));
-    assertTrue("elapsed time not greater than 0",
-        info.getLong("elapsedTime") > 0);
-    checkStringMatch("amHostHttpAddress",
-        app.getCurrentAppAttempt().getMasterContainer().getNodeHttpAddress(),
-        info.getString("amHostHttpAddress"));
-    assertTrue("amContainerLogs doesn't match",
-        info.getString("amContainerLogs").startsWith("http://"));
-    assertTrue("amContainerLogs doesn't contain user info",
-        info.getString("amContainerLogs").endsWith("/" + app.getUser()));
-    assertEquals("allocatedMB doesn't match", 1024, info.getInt("allocatedMB"));
-    assertEquals("allocatedVCores doesn't match", 1,
-        info.getInt("allocatedVCores"));
-    assertEquals("queueUsagePerc doesn't match", 50.0f,
-        (float) info.getDouble("queueUsagePercentage"), 0.01f);
-    assertEquals("clusterUsagePerc doesn't match", 50.0f,
-        (float) info.getDouble("clusterUsagePercentage"), 0.01f);
-    assertEquals("numContainers doesn't match", 1,
-        info.getInt("runningContainers"));
-    assertNotNull("preemptedResourceSecondsMap should not be null",
-        info.getJSONObject("preemptedResourceSecondsMap"));
-    assertEquals("preemptedResourceMB doesn't match",
-        app.getRMAppMetrics().getResourcePreempted().getMemorySize(),
-        info.getInt("preemptedResourceMB"));
-    assertEquals("preemptedResourceVCores doesn't match",
-        app.getRMAppMetrics().getResourcePreempted().getVirtualCores(),
-        info.getInt("preemptedResourceVCores"));
-    assertEquals("numNonAMContainerPreempted doesn't match",
-        app.getRMAppMetrics().getNumNonAMContainersPreempted(),
-        info.getInt("numNonAMContainerPreempted"));
-    assertEquals("numAMContainerPreempted doesn't match",
-        app.getRMAppMetrics().getNumAMContainersPreempted(),
-        info.getInt("numAMContainerPreempted"));
-    assertEquals("Log aggregation Status doesn't match",
-        app.getLogAggregationStatusForAppReport().toString(),
-        info.getString("logAggregationStatus"));
-    assertEquals("unmanagedApplication doesn't match",
-        app.getApplicationSubmissionContext().getUnmanagedAM(),
-        info.getBoolean("unmanagedApplication"));
-
-    if (app.getApplicationSubmissionContext()
-        .getNodeLabelExpression() != null) {
-      assertEquals("appNodeLabelExpression doesn't match",
-          app.getApplicationSubmissionContext().getNodeLabelExpression(),
-          info.getString("appNodeLabelExpression"));
-    }
-    assertEquals("amNodeLabelExpression doesn't match",
-        app.getAMResourceRequests().get(0).getNodeLabelExpression(),
-        info.getString("amNodeLabelExpression"));
-    assertEquals("amRPCAddress",
-        AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
-        info.getString("amRPCAddress"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
deleted file mode 100644
index 7c5b6db..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
-import org.w3c.dom.Element;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlFloat;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Contains all value verifications that are needed to verify {@link AppInfo}
- * XML documents.
- */
-public final class AppInfoXmlVerifications {
-
-  private AppInfoXmlVerifications() {
-    //utility class
-  }
-
-  /**
-   * Tests whether {@link AppInfo} representation object contains the required
-   * values as per defined in the specified app parameter.
-   * @param info
-   * @param  app  an RMApp instance that contains the required values
-   */
-  public static void verify(Element info, RMApp app) {
-    checkStringMatch("id", app.getApplicationId()
-            .toString(), getXmlString(info, "id"));
-    checkStringMatch("user", app.getUser(),
-            getXmlString(info, "user"));
-    checkStringMatch("name", app.getName(),
-            getXmlString(info, "name"));
-    checkStringMatch("applicationType",
-            app.getApplicationType(), getXmlString(info, "applicationType"));
-    checkStringMatch("queue", app.getQueue(),
-            getXmlString(info, "queue"));
-    assertEquals("priority doesn't match", 0, getXmlInt(info, "priority"));
-    checkStringMatch("state", app.getState().toString(),
-            getXmlString(info, "state"));
-    checkStringMatch("finalStatus", app
-            .getFinalApplicationStatus().toString(),
-            getXmlString(info, "finalStatus"));
-    assertEquals("progress doesn't match", 0, getXmlFloat(info, "progress"),
-        0.0);
-    if ("UNASSIGNED".equals(getXmlString(info, "trackingUI"))) {
-      checkStringMatch("trackingUI", "UNASSIGNED",
-              getXmlString(info, "trackingUI"));
-    }
-    WebServicesTestUtils.checkStringEqual("diagnostics",
-            app.getDiagnostics().toString(), getXmlString(info, "diagnostics"));
-    assertEquals("clusterId doesn't match",
-            ResourceManager.getClusterTimeStamp(),
-            getXmlLong(info, "clusterId"));
-    assertEquals("startedTime doesn't match", app.getStartTime(),
-            getXmlLong(info, "startedTime"));
-    assertEquals("finishedTime doesn't match", app.getFinishTime(),
-            getXmlLong(info, "finishedTime"));
-    assertTrue("elapsed time not greater than 0",
-            getXmlLong(info, "elapsedTime") > 0);
-    checkStringMatch("amHostHttpAddress", app
-                    .getCurrentAppAttempt().getMasterContainer()
-                    .getNodeHttpAddress(),
-            getXmlString(info, "amHostHttpAddress"));
-    assertTrue("amContainerLogs doesn't match",
-        getXmlString(info, "amContainerLogs").startsWith("http://"));
-    assertTrue("amContainerLogs doesn't contain user info",
-        getXmlString(info, "amContainerLogs").endsWith("/" + app.getUser()));
-    assertEquals("allocatedMB doesn't match", 1024,
-            getXmlInt(info, "allocatedMB"));
-    assertEquals("allocatedVCores doesn't match", 1,
-            getXmlInt(info, "allocatedVCores"));
-    assertEquals("queueUsagePerc doesn't match", 50.0f,
-            getXmlFloat(info, "queueUsagePercentage"), 0.01f);
-    assertEquals("clusterUsagePerc doesn't match", 50.0f,
-            getXmlFloat(info, "clusterUsagePercentage"), 0.01f);
-    assertEquals("numContainers doesn't match", 1,
-        getXmlInt(info, "runningContainers"));
-    assertNotNull("preemptedResourceSecondsMap should not be null",
-            info.getElementsByTagName("preemptedResourceSecondsMap"));
-    assertEquals("preemptedResourceMB doesn't match", app
-                    .getRMAppMetrics().getResourcePreempted().getMemorySize(),
-            getXmlInt(info, "preemptedResourceMB"));
-    assertEquals("preemptedResourceVCores doesn't match", app
-                    .getRMAppMetrics().getResourcePreempted().getVirtualCores(),
-            getXmlInt(info, "preemptedResourceVCores"));
-    assertEquals("numNonAMContainerPreempted doesn't match", app
-                    .getRMAppMetrics().getNumNonAMContainersPreempted(),
-            getXmlInt(info, "numNonAMContainerPreempted"));
-    assertEquals("numAMContainerPreempted doesn't match", app
-                    .getRMAppMetrics().getNumAMContainersPreempted(),
-            getXmlInt(info, "numAMContainerPreempted"));
-    assertEquals("Log aggregation Status doesn't match", app
-                    .getLogAggregationStatusForAppReport().toString(),
-            getXmlString(info, "logAggregationStatus"));
-    assertEquals("unmanagedApplication doesn't match", app
-                    .getApplicationSubmissionContext().getUnmanagedAM(),
-            getXmlBoolean(info, "unmanagedApplication"));
-    assertEquals("unmanagedApplication doesn't match",
-            app.getApplicationSubmissionContext().getNodeLabelExpression(),
-            getXmlString(info, "appNodeLabelExpression"));
-    assertEquals("unmanagedApplication doesn't match",
-            app.getAMResourceRequests().get(0).getNodeLabelExpression(),
-            getXmlString(info, "amNodeLabelExpression"));
-    assertEquals("amRPCAddress",
-            AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
-            getXmlString(info, "amRPCAddress"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
deleted file mode 100644
index a8990ca..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-
-import com.sun.jersey.api.client.ClientHandlerException;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.UniformInterfaceException;
-
-import javax.ws.rs.core.MediaType;
-import java.io.IOException;
-
-/**
- * This class is merely a wrapper for {@link ClientResponse}. Given that the
- * entity input stream of {@link ClientResponse} can be read only once by
- * default and for some tests it is convenient to read the input stream many
- * times, this class hides the details of how to do that and prevents
- * unnecessary code duplication in tests.
- */
-public class BufferedClientResponse {
-  private ClientResponse response;
-
-  public BufferedClientResponse(ClientResponse response) {
-    response.bufferEntity();
-    this.response = response;
-  }
-
-  public <T> T getEntity(Class<T> clazz)
-          throws ClientHandlerException, UniformInterfaceException {
-    try {
-      response.getEntityInputStream().reset();
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
-    return response.getEntity(clazz);
-  }
-
-  public MediaType getType() {
-    return response.getType();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
deleted file mode 100644
index 9d6a111..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.http.JettyUtils;
-import org.codehaus.jettison.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.ws.rs.core.MediaType;
-
-import java.util.function.Consumer;
-
-import static org.junit.Assert.*;
-
-/**
- * This class hides the implementation details of how to verify the structure of
- * JSON responses. Tests should only provide the path of the
- * {@link WebResource}, the response from the resource and
- * the verifier Consumer to
- * {@link JsonCustomResourceTypeTestcase#verify(Consumer)}. An instance of
- * {@link JSONObject} will be passed to that consumer to be able to
- * verify the response.
- */
-public class JsonCustomResourceTypeTestcase {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(JsonCustomResourceTypeTestcase.class);
-
-  private final WebResource path;
-  private final BufferedClientResponse response;
-  private final JSONObject parsedResponse;
-
-  public JsonCustomResourceTypeTestcase(WebResource path,
-                                        BufferedClientResponse response) {
-    this.path = path;
-    this.response = response;
-    this.parsedResponse = response.getEntity(JSONObject.class);
-  }
-
-  public void verify(Consumer<JSONObject> verifier) {
-    assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
-        response.getType().toString());
-
-    logResponse();
-
-    String responseStr = response.getEntity(String.class);
-    if (responseStr == null || responseStr.isEmpty()) {
-      throw new IllegalStateException("Response is null or empty!");
-    }
-    verifier.accept(parsedResponse);
-  }
-
-  private void logResponse() {
-    String responseStr = response.getEntity(String.class);
-    LOG.info("Raw response from service URL {}: {}", path.toString(),
-        responseStr);
-    LOG.info("Parsed response from service URL {}: {}", path.toString(),
-        parsedResponse);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
deleted file mode 100644
index 6e58a89..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-
-import java.util.List;
-import java.util.Map;
-
-import static junit.framework.TestCase.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-/**
- * Performs value verifications on
- * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
- * objects against the values of {@link ResourceRequest}. With the help of the
- * {@link Builder}, users can also make verifications of the custom resource
- * types and its values.
- */
-public class ResourceRequestsJsonVerifications {
-  private final ResourceRequest resourceRequest;
-  private final JSONObject requestInfo;
-  private final Map<String, Long> customResourceTypes;
-  private final List<String> expectedCustomResourceTypes;
-
-  ResourceRequestsJsonVerifications(Builder builder) {
-    this.resourceRequest = builder.resourceRequest;
-    this.requestInfo = builder.requestInfo;
-    this.customResourceTypes = builder.customResourceTypes;
-    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
-  }
-
-  public static void verify(JSONObject requestInfo, ResourceRequest rr)
-      throws JSONException {
-    createDefaultBuilder(requestInfo, rr).build().verify();
-  }
-
-  public static void verifyWithCustomResourceTypes(JSONObject requestInfo,
-      ResourceRequest resourceRequest, List<String> expectedResourceTypes)
-      throws JSONException {
-
-    createDefaultBuilder(requestInfo, resourceRequest)
-        .withExpectedCustomResourceTypes(expectedResourceTypes)
-        .withCustomResourceTypes(
-            extractActualCustomResourceTypes(requestInfo, expectedResourceTypes))
-        .build().verify();
-  }
-
-  private static Builder createDefaultBuilder(JSONObject requestInfo,
-      ResourceRequest resourceRequest) {
-    return new ResourceRequestsJsonVerifications.Builder()
-            .withRequest(resourceRequest)
-            .withRequestInfoJson(requestInfo);
-  }
-
-  private static Map<String, Long> extractActualCustomResourceTypes(
-      JSONObject requestInfo, List<String> expectedResourceTypes)
-      throws JSONException {
-    JSONObject capability = requestInfo.getJSONObject("capability");
-    Map<String, Long> resourceAndValue =
-        extractCustomResorceTypeValues(capability, expectedResourceTypes);
-    Map.Entry<String, Long> resourceEntry =
-        resourceAndValue.entrySet().iterator().next();
-
-    assertTrue(
-        "Found resource type: " + resourceEntry.getKey()
-            + " is not in expected resource types: " + expectedResourceTypes,
-        expectedResourceTypes.contains(resourceEntry.getKey()));
-
-    return resourceAndValue;
-  }
-
-  private static Map<String, Long> extractCustomResorceTypeValues(
-      JSONObject capability, List<String> expectedResourceTypes)
-      throws JSONException {
-    assertTrue(
-        "resourceCategory does not have resourceInformations: " + capability,
-        capability.has("resourceInformations"));
-
-    JSONObject resourceInformations =
-        capability.getJSONObject("resourceInformations");
-    assertTrue(
-        "resourceInformations does not have resourceInformation object: "
-            + resourceInformations,
-        resourceInformations.has("resourceInformation"));
-    JSONArray customResources =
-        resourceInformations.getJSONArray("resourceInformation");
-
-    // customResources will include vcores / memory as well
-    assertEquals(
-        "Different number of custom resource types found than expected",
-        expectedResourceTypes.size(), customResources.length() - 2);
-
-    Map<String, Long> resourceValues = Maps.newHashMap();
-    for (int i = 0; i < customResources.length(); i++) {
-      JSONObject customResource = customResources.getJSONObject(i);
-      assertTrue("Resource type does not have name field: " + customResource,
-          customResource.has("name"));
-      assertTrue("Resource type does not have name resourceType field: "
-          + customResource, customResource.has("resourceType"));
-      assertTrue(
-          "Resource type does not have name units field: " + customResource,
-          customResource.has("units"));
-      assertTrue(
-          "Resource type does not have name value field: " + customResource,
-          customResource.has("value"));
-
-      String name = customResource.getString("name");
-      String unit = customResource.getString("units");
-      String resourceType = customResource.getString("resourceType");
-      Long value = customResource.getLong("value");
-
-      if (ResourceInformation.MEMORY_URI.equals(name)
-          || ResourceInformation.VCORES_URI.equals(name)) {
-        continue;
-      }
-
-      assertTrue("Custom resource type " + name + " not found",
-          expectedResourceTypes.contains(name));
-      assertEquals("k", unit);
-      assertEquals(ResourceTypes.COUNTABLE,
-          ResourceTypes.valueOf(resourceType));
-      assertNotNull("Custom resource value " + value + " is null!", value);
-      resourceValues.put(name, value);
-    }
-
-    return resourceValues;
-  }
-
-  private void verify() throws JSONException {
-    assertEquals("nodeLabelExpression doesn't match",
-        resourceRequest.getNodeLabelExpression(),
-            requestInfo.getString("nodeLabelExpression"));
-    assertEquals("numContainers doesn't match",
-            resourceRequest.getNumContainers(),
-            requestInfo.getInt("numContainers"));
-    assertEquals("relaxLocality doesn't match",
-            resourceRequest.getRelaxLocality(),
-            requestInfo.getBoolean("relaxLocality"));
-    assertEquals("priority does not match",
-            resourceRequest.getPriority().getPriority(),
-            requestInfo.getInt("priority"));
-    assertEquals("resourceName does not match",
-            resourceRequest.getResourceName(),
-            requestInfo.getString("resourceName"));
-    assertEquals("memory does not match",
-        resourceRequest.getCapability().getMemorySize(),
-            requestInfo.getJSONObject("capability").getLong("memory"));
-    assertEquals("vCores does not match",
-        resourceRequest.getCapability().getVirtualCores(),
-            requestInfo.getJSONObject("capability").getLong("vCores"));
-
-    verifyAtLeastOneCustomResourceIsSerialized();
-
-    JSONObject executionTypeRequest =
-            requestInfo.getJSONObject("executionTypeRequest");
-    assertEquals("executionType does not match",
-        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
-            executionTypeRequest.getString("executionType"));
-    assertEquals("enforceExecutionType does not match",
-            resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
-            executionTypeRequest.getBoolean("enforceExecutionType"));
-  }
-
-  /**
-   * JSON serialization produces "invalid JSON" by default as maps are
-   * serialized like this:
-   * "customResources":{"entry":{"key":"customResource-1","value":"0"}}
-   * If the map has multiple keys then multiple entries will be serialized.
-   * Our json parser in tests cannot handle duplicates therefore only one
-   * custom resource will be in the parsed json. See:
-   * https://issues.apache.org/jira/browse/YARN-7505
-   */
-  private void verifyAtLeastOneCustomResourceIsSerialized() {
-    boolean resourceFound = false;
-    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
-      if (customResourceTypes.containsKey(expectedCustomResourceType)) {
-        resourceFound = true;
-        Long resourceValue =
-            customResourceTypes.get(expectedCustomResourceType);
-        assertNotNull("Resource value should not be null!", resourceValue);
-      }
-    }
-    assertTrue("No custom resource type can be found in the response!",
-        resourceFound);
-  }
-
-  /**
-   * Builder class for {@link ResourceRequestsJsonVerifications}.
-   */
-  public static final class Builder {
-    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
-    private Map<String, Long> customResourceTypes;
-    private ResourceRequest resourceRequest;
-    private JSONObject requestInfo;
-
-    Builder() {
-    }
-
-    public static Builder create() {
-      return new Builder();
-    }
-
-    Builder withExpectedCustomResourceTypes(
-            List<String> expectedCustomResourceTypes) {
-      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
-      return this;
-    }
-
-    Builder withCustomResourceTypes(
-            Map<String, Long> customResourceTypes) {
-      this.customResourceTypes = customResourceTypes;
-      return this;
-    }
-
-    Builder withRequest(ResourceRequest resourceRequest) {
-      this.resourceRequest = resourceRequest;
-      return this;
-    }
-
-    Builder withRequestInfoJson(JSONObject requestInfo) {
-      this.requestInfo = requestInfo;
-      return this;
-    }
-
-    public ResourceRequestsJsonVerifications build() {
-      return new ResourceRequestsJsonVerifications(this);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
deleted file mode 100644
index af9b0f3..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.w3c.dom.Element;
-import org.w3c.dom.NodeList;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import static junit.framework.TestCase.assertTrue;
-import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
-import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-/**
- * Performs value verifications on
- * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
- * objects against the values of {@link ResourceRequest}. With the help of the
- * {@link Builder}, users can also make verifications of the custom resource
- * types and its values.
- */
-public class ResourceRequestsXmlVerifications {
-  private final ResourceRequest resourceRequest;
-  private final Element requestInfo;
-  private final Map<String, Long> customResourceTypes;
-  private final List<String> expectedCustomResourceTypes;
-
-  ResourceRequestsXmlVerifications(Builder builder) {
-    this.resourceRequest = builder.resourceRequest;
-    this.requestInfo = builder.requestInfo;
-    this.customResourceTypes = builder.customResourceTypes;
-    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
-  }
-
-  public static void verifyWithCustomResourceTypes(Element requestInfo,
-      ResourceRequest resourceRequest, List<String> expectedResourceTypes) {
-
-    createDefaultBuilder(requestInfo, resourceRequest)
-        .withExpectedCustomResourceTypes(expectedResourceTypes)
-        .withCustomResourceTypes(extractActualCustomResourceType(requestInfo,
-            expectedResourceTypes))
-        .build().verify();
-  }
-
-  private static Builder createDefaultBuilder(Element requestInfo,
-      ResourceRequest resourceRequest) {
-    return new ResourceRequestsXmlVerifications.Builder()
-        .withRequest(resourceRequest).withRequestInfo(requestInfo);
-  }
-
-  private static Map<String, Long> extractActualCustomResourceType(
-      Element requestInfo, List<String> expectedResourceTypes) {
-    Element capability =
-        (Element) requestInfo.getElementsByTagName("capability").item(0);
-
-    return extractCustomResorceTypes(capability,
-        Sets.newHashSet(expectedResourceTypes));
-  }
-
-  private static Map<String, Long> extractCustomResorceTypes(Element capability,
-      Set<String> expectedResourceTypes) {
-    assertEquals(
-        toXml(capability) + " should have only one resourceInformations child!",
-        1, capability.getElementsByTagName("resourceInformations").getLength());
-    Element resourceInformations = (Element) capability
-        .getElementsByTagName("resourceInformations").item(0);
-
-    NodeList customResources =
-        resourceInformations.getElementsByTagName("resourceInformation");
-
-    // customResources will include vcores / memory as well
-    assertEquals(
-        "Different number of custom resource types found than expected",
-        expectedResourceTypes.size(), customResources.getLength() - 2);
-
-    Map<String, Long> resourceTypesAndValues = Maps.newHashMap();
-    for (int i = 0; i < customResources.getLength(); i++) {
-      Element customResource = (Element) customResources.item(i);
-      String name = getXmlString(customResource, "name");
-      String unit = getXmlString(customResource, "units");
-      String resourceType = getXmlString(customResource, "resourceType");
-      Long value = getXmlLong(customResource, "value");
-
-      if (ResourceInformation.MEMORY_URI.equals(name)
-          || ResourceInformation.VCORES_URI.equals(name)) {
-        continue;
-      }
-
-      assertTrue("Custom resource type " + name + " not found",
-          expectedResourceTypes.contains(name));
-      assertEquals("k", unit);
-      assertEquals(ResourceTypes.COUNTABLE,
-          ResourceTypes.valueOf(resourceType));
-      assertNotNull("Resource value should not be null for resource type "
-          + resourceType + ", listing xml contents: " + toXml(customResource),
-          value);
-      resourceTypesAndValues.put(name, value);
-    }
-
-    return resourceTypesAndValues;
-  }
-
-  private void verify() {
-    assertEquals("nodeLabelExpression doesn't match",
-        resourceRequest.getNodeLabelExpression(),
-        getXmlString(requestInfo, "nodeLabelExpression"));
-    assertEquals("numContainers doesn't match",
-        resourceRequest.getNumContainers(),
-        getXmlInt(requestInfo, "numContainers"));
-    assertEquals("relaxLocality doesn't match",
-        resourceRequest.getRelaxLocality(),
-        getXmlBoolean(requestInfo, "relaxLocality"));
-    assertEquals("priority does not match",
-        resourceRequest.getPriority().getPriority(),
-        getXmlInt(requestInfo, "priority"));
-    assertEquals("resourceName does not match",
-        resourceRequest.getResourceName(),
-        getXmlString(requestInfo, "resourceName"));
-    Element capability = (Element) requestInfo
-            .getElementsByTagName("capability").item(0);
-    assertEquals("memory does not match",
-        resourceRequest.getCapability().getMemorySize(),
-        getXmlLong(capability, "memory"));
-    assertEquals("vCores does not match",
-        resourceRequest.getCapability().getVirtualCores(),
-        getXmlLong(capability, "vCores"));
-
-    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
-      assertTrue(
-          "Custom resource type " + expectedCustomResourceType
-              + " cannot be found!",
-          customResourceTypes.containsKey(expectedCustomResourceType));
-
-      Long resourceValue = customResourceTypes.get(expectedCustomResourceType);
-      assertNotNull("Resource value should not be null!", resourceValue);
-    }
-
-    Element executionTypeRequest = (Element) requestInfo
-        .getElementsByTagName("executionTypeRequest").item(0);
-    assertEquals("executionType does not match",
-        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
-        getXmlString(executionTypeRequest, "executionType"));
-    assertEquals("enforceExecutionType does not match",
-        resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
-        getXmlBoolean(executionTypeRequest, "enforceExecutionType"));
-  }
-
-  /**
-   * Builder class for {@link ResourceRequestsXmlVerifications}.
-   */
-  public static final class Builder {
-    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
-    private Map<String, Long> customResourceTypes;
-    private ResourceRequest resourceRequest;
-    private Element requestInfo;
-
-    Builder() {
-    }
-
-    public static Builder create() {
-      return new Builder();
-    }
-
-    Builder withExpectedCustomResourceTypes(
-        List<String> expectedCustomResourceTypes) {
-      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
-      return this;
-    }
-
-    Builder withCustomResourceTypes(Map<String, Long> customResourceTypes) {
-      this.customResourceTypes = customResourceTypes;
-      return this;
-    }
-
-    Builder withRequest(ResourceRequest resourceRequest) {
-      this.resourceRequest = resourceRequest;
-      return this;
-    }
-
-    Builder withRequestInfo(Element requestInfo) {
-      this.requestInfo = requestInfo;
-      return this;
-    }
-
-    public ResourceRequestsXmlVerifications build() {
-      return new ResourceRequestsXmlVerifications(this);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
deleted file mode 100644
index 29260aa..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
-
-import com.sun.jersey.api.client.WebResource;
-import org.apache.hadoop.http.JettyUtils;
-import org.codehaus.jettison.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Node;
-import org.xml.sax.InputSource;
-
-import javax.ws.rs.core.MediaType;
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.transform.*;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.util.function.Consumer;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class hides the implementation details of how to verify the structure of
- * XML responses. Tests should only provide the path of the
- * {@link WebResource}, the response from the resource and
- * the verifier Consumer to
- * {@link XmlCustomResourceTypeTestCase#verify(Consumer)}. An instance of
- * {@link JSONObject} will be passed to that consumer to be able to
- * verify the response.
- */
-public class XmlCustomResourceTypeTestCase {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(XmlCustomResourceTypeTestCase.class);
-
-  private WebResource path;
-  private BufferedClientResponse response;
-  private Document parsedResponse;
-
-  public XmlCustomResourceTypeTestCase(WebResource path,
-                                       BufferedClientResponse response) {
-    this.path = path;
-    this.response = response;
-  }
-
-  public void verify(Consumer<Document> verifier) {
-    assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8,
-        response.getType().toString());
-
-    parsedResponse = parseXml(response);
-    logResponse(parsedResponse);
-    verifier.accept(parsedResponse);
-  }
-
-  private Document parseXml(BufferedClientResponse response) {
-    try {
-      String xml = response.getEntity(String.class);
-      DocumentBuilder db =
-          DocumentBuilderFactory.newInstance().newDocumentBuilder();
-      InputSource is = new InputSource();
-      is.setCharacterStream(new StringReader(xml));
-
-      return db.parse(is);
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-  }
-
-  private void logResponse(Document doc) {
-    String responseStr = response.getEntity(String.class);
-    LOG.info("Raw response from service URL {}: {}", path.toString(),
-        responseStr);
-    LOG.info("Parsed response from service URL {}: {}", path.toString(),
-        toXml(doc));
-  }
-
-  public static String toXml(Node node) {
-    StringWriter writer;
-    try {
-      TransformerFactory tf = TransformerFactory.newInstance();
-      Transformer transformer = tf.newTransformer();
-      transformer.setOutputProperty(OutputKeys.INDENT, "yes");
-      transformer.setOutputProperty(
-          "{http://xml.apache.org/xslt}indent" + "-amount", "2");
-      writer = new StringWriter();
-      transformer.transform(new DOMSource(node), new StreamResult(writer));
-    } catch (TransformerException e) {
-      throw new RuntimeException(e);
-    }
-
-    return writer.getBuffer().toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index b5bcbf5..269f5b4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,11 +86,11 @@ The allocation file must be in XML format. The format contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an optional attribute 'type', which when set to 'parent' makes it a parent queue. This is useful when we want to create a parent queue without configuring any leaf queues. Each queue element may contain the following properties:
 
-    * **minResources**: minimum resources the queue is entitled to, in the form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is required when specifying resources other than memory and CPU. For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and its minimum. Note that it is possible for a queue that is below its minimum to not immediately get up to its minimum when an a
 pplication is submitted to the queue, because already-running jobs may be using those resources.
+    * **minResources**: minimum resources the queue is entitled to, in the form "X mb, Y vcores". For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and minimum. Note that it is possible that a queue that is below its minimum may not immediately get up to its minimum when it submits an application, because already-running jobs may be using those resources.
 
-    * **maxResources**: maximum resources a queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. A queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxResources**: maximum resources a queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). A queue will not be assigned a container that would put its aggregate usage over this limit.
 
-    * **maxChildResources**: maximum resources an ad hoc child queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxChildResources**: maximum resources an ad hoc child queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
 
     * **maxRunningApps**: limit the number of apps from the queue to run at once
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
new file mode 100644
index 0000000..05c8d45
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -0,0 +1,277 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.util.Time;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Args for key block. The block instance for the key requested in putKey.
+ * This is returned from OM to client, and client use class to talk to
+ * datanode. Also, this is the metadata written to om.db on server side.
+ */
+public final class OmKeyInfo {
+  private final String volumeName;
+  private final String bucketName;
+  // name of key client specified
+  private String keyName;
+  private long dataSize;
+  private List<OmKeyLocationInfoGroup> keyLocationVersions;
+  private final long creationTime;
+  private long modificationTime;
+  private HddsProtos.ReplicationType type;
+  private HddsProtos.ReplicationFactor factor;
+
+  private OmKeyInfo(String volumeName, String bucketName, String keyName,
+                    List<OmKeyLocationInfoGroup> versions, long dataSize,
+                    long creationTime, long modificationTime, HddsProtos.ReplicationType type,
+                    HddsProtos.ReplicationFactor factor) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.keyName = keyName;
+    this.dataSize = dataSize;
+    // it is important that the versions are ordered from old to new.
+    // Do this sanity check when versions got loaded on creating OmKeyInfo.
+    // TODO : this is not necessary, here only because versioning is still a
+    // work in-progress, remove this following check when versioning is
+    // complete and prove correctly functioning
+    long currentVersion = -1;
+    for (OmKeyLocationInfoGroup version : versions) {
+      Preconditions.checkArgument(
+            currentVersion + 1 == version.getVersion());
+      currentVersion = version.getVersion();
+    }
+    this.keyLocationVersions = versions;
+    this.creationTime = creationTime;
+    this.modificationTime = modificationTime;
+    this.factor = factor;
+    this.type = type;
+  }
+
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  public HddsProtos.ReplicationType getType() {
+    return type;
+  }
+
+  public HddsProtos.ReplicationFactor getFactor() {
+    return factor;
+  }
+
+  public String getKeyName() {
+    return keyName;
+  }
+
+  public void setKeyName(String keyName) {
+    this.keyName = keyName;
+  }
+
+  public long getDataSize() {
+    return dataSize;
+  }
+
+  public void setDataSize(long size) {
+    this.dataSize = size;
+  }
+
+  public synchronized OmKeyLocationInfoGroup getLatestVersionLocations()
+      throws IOException {
+    return keyLocationVersions.size() == 0? null :
+        keyLocationVersions.get(keyLocationVersions.size() - 1);
+  }
+
+  public List<OmKeyLocationInfoGroup> getKeyLocationVersions() {
+    return keyLocationVersions;
+  }
+
+  public void updateModifcationTime() {
+    this.modificationTime = Time.monotonicNow();
+  }
+
+  /**
+   * Append a set of blocks to the latest version. Note that these blocks are
+   * part of the latest version, not a new version.
+   *
+   * @param newLocationList the list of new blocks to be added.
+   * @throws IOException
+   */
+  public synchronized void appendNewBlocks(
+      List<OmKeyLocationInfo> newLocationList) throws IOException {
+    if (keyLocationVersions.size() == 0) {
+      throw new IOException("Appending new block, but no version exist");
+    }
+    OmKeyLocationInfoGroup currentLatestVersion =
+        keyLocationVersions.get(keyLocationVersions.size() - 1);
+    currentLatestVersion.appendNewBlocks(newLocationList);
+    setModificationTime(Time.now());
+  }
+
+  /**
+   * Add a new set of blocks. The new blocks will be added as appending a new
+   * version to the all version list.
+   *
+   * @param newLocationList the list of new blocks to be added.
+   * @throws IOException
+   */
+  public synchronized long addNewVersion(
+      List<OmKeyLocationInfo> newLocationList) throws IOException {
+    long latestVersionNum;
+    if (keyLocationVersions.size() == 0) {
+      // no version exist, these blocks are the very first version.
+      keyLocationVersions.add(new OmKeyLocationInfoGroup(0, newLocationList));
+      latestVersionNum = 0;
+    } else {
+      // it is important that the new version are always at the tail of the list
+      OmKeyLocationInfoGroup currentLatestVersion =
+          keyLocationVersions.get(keyLocationVersions.size() - 1);
+      // the new version is created based on the current latest version
+      OmKeyLocationInfoGroup newVersion =
+          currentLatestVersion.generateNextVersion(newLocationList);
+      keyLocationVersions.add(newVersion);
+      latestVersionNum = newVersion.getVersion();
+    }
+    setModificationTime(Time.now());
+    return latestVersionNum;
+  }
+
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  public long getModificationTime() {
+    return modificationTime;
+  }
+
+  public void setModificationTime(long modificationTime) {
+    this.modificationTime = modificationTime;
+  }
+
+  /**
+   * Builder of OmKeyInfo.
+   */
+  public static class Builder {
+    private String volumeName;
+    private String bucketName;
+    private String keyName;
+    private long dataSize;
+    private List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups;
+    private long creationTime;
+    private long modificationTime;
+    private HddsProtos.ReplicationType type;
+    private HddsProtos.ReplicationFactor factor;
+
+    public Builder setVolumeName(String volume) {
+      this.volumeName = volume;
+      return this;
+    }
+
+    public Builder setBucketName(String bucket) {
+      this.bucketName = bucket;
+      return this;
+    }
+
+    public Builder setKeyName(String key) {
+      this.keyName = key;
+      return this;
+    }
+
+    public Builder setOmKeyLocationInfos(
+        List<OmKeyLocationInfoGroup> omKeyLocationInfoList) {
+      this.omKeyLocationInfoGroups = omKeyLocationInfoList;
+      return this;
+    }
+
+    public Builder setDataSize(long size) {
+      this.dataSize = size;
+      return this;
+    }
+
+    public Builder setCreationTime(long crTime) {
+      this.creationTime = crTime;
+      return this;
+    }
+
+    public Builder setModificationTime(long mTime) {
+      this.modificationTime = mTime;
+      return this;
+    }
+
+    public Builder setReplicationFactor(HddsProtos.ReplicationFactor factor) {
+      this.factor = factor;
+      return this;
+    }
+
+    public Builder setReplicationType(HddsProtos.ReplicationType type) {
+      this.type = type;
+      return this;
+    }
+
+    public OmKeyInfo build() {
+      return new OmKeyInfo(
+          volumeName, bucketName, keyName, omKeyLocationInfoGroups,
+          dataSize, creationTime, modificationTime, type, factor);
+    }
+  }
+
+  public KeyInfo getProtobuf() {
+    long latestVersion = keyLocationVersions.size() == 0 ? -1 :
+        keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
+    return KeyInfo.newBuilder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(dataSize)
+        .setFactor(factor)
+        .setType(type)
+        .addAllKeyLocationList(keyLocationVersions.stream()
+            .map(OmKeyLocationInfoGroup::getProtobuf)
+            .collect(Collectors.toList()))
+        .setLatestVersion(latestVersion)
+        .setCreationTime(creationTime)
+        .setModificationTime(modificationTime)
+        .build();
+  }
+
+  public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) {
+    return new OmKeyInfo(
+        keyInfo.getVolumeName(),
+        keyInfo.getBucketName(),
+        keyInfo.getKeyName(),
+        keyInfo.getKeyLocationListList().stream()
+            .map(OmKeyLocationInfoGroup::getFromProtobuf)
+            .collect(Collectors.toList()),
+        keyInfo.getDataSize(),
+        keyInfo.getCreationTime(),
+        keyInfo.getModificationTime(),
+        keyInfo.getType(),
+        keyInfo.getFactor());
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
new file mode 100644
index 0000000..3f6666d
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
+
+/**
+ * One key can be too huge to fit in one container. In which case it gets split
+ * into a number of subkeys. This class represents one such subkey instance.
+ */
+public final class OmKeyLocationInfo {
+  private final BlockID blockID;
+  private final boolean shouldCreateContainer;
+  // the id of this subkey in all the subkeys.
+  private final long length;
+  private final long offset;
+  // the version number indicating when this block was added
+  private long createVersion;
+
+  private OmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer,
+                            long length, long offset) {
+    this.blockID = blockID;
+    this.shouldCreateContainer = shouldCreateContainer;
+    this.length = length;
+    this.offset = offset;
+  }
+
+  public void setCreateVersion(long version) {
+    createVersion = version;
+  }
+
+  public long getCreateVersion() {
+    return createVersion;
+  }
+
+  public BlockID getBlockID() {
+    return blockID;
+  }
+
+  public long getContainerID() {
+    return blockID.getContainerID();
+  }
+
+  public long getLocalID() {
+    return blockID.getLocalID();
+  }
+
+  public boolean getShouldCreateContainer() {
+    return shouldCreateContainer;
+  }
+
+  public long getLength() {
+    return length;
+  }
+
+  public long getOffset() {
+    return offset;
+  }
+
+  /**
+   * Builder of OmKeyLocationInfo.
+   */
+  public static class Builder {
+    private BlockID blockID;
+    private boolean shouldCreateContainer;
+    private long length;
+    private long offset;
+
+    public Builder setBlockID(BlockID blockId) {
+      this.blockID = blockId;
+      return this;
+    }
+
+    public Builder setShouldCreateContainer(boolean create) {
+      this.shouldCreateContainer = create;
+      return this;
+    }
+
+    public Builder setLength(long len) {
+      this.length = len;
+      return this;
+    }
+
+    public Builder setOffset(long off) {
+      this.offset = off;
+      return this;
+    }
+
+    public OmKeyLocationInfo build() {
+      return new OmKeyLocationInfo(blockID,
+          shouldCreateContainer, length, offset);
+    }
+  }
+
+  public KeyLocation getProtobuf() {
+    return KeyLocation.newBuilder()
+        .setBlockID(blockID.getProtobuf())
+        .setShouldCreateContainer(shouldCreateContainer)
+        .setLength(length)
+        .setOffset(offset)
+        .setCreateVersion(createVersion)
+        .build();
+  }
+
+  public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
+    OmKeyLocationInfo info = new OmKeyLocationInfo(
+        BlockID.getFromProtobuf(keyLocation.getBlockID()),
+        keyLocation.getShouldCreateContainer(),
+        keyLocation.getLength(),
+        keyLocation.getOffset());
+    info.setCreateVersion(keyLocation.getCreateVersion());
+    return info;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
new file mode 100644
index 0000000..8bdcee3
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * A list of key locations. This class represents one single version of the
+ * blocks of a key.
+ */
+public class OmKeyLocationInfoGroup {
+  private final long version;
+  private final List<OmKeyLocationInfo> locationList;
+
+  public OmKeyLocationInfoGroup(long version,
+                                List<OmKeyLocationInfo> locations) {
+    this.version = version;
+    this.locationList = locations;
+  }
+
+  /**
+   * Return only the blocks that are created in the most recent version.
+   *
+   * @return the list of blocks that are created in the latest version.
+   */
+  public List<OmKeyLocationInfo> getBlocksLatestVersionOnly() {
+    List<OmKeyLocationInfo> list = new ArrayList<>();
+    locationList.stream().filter(x -> x.getCreateVersion() == version)
+        .forEach(list::add);
+    return list;
+  }
+
+  public long getVersion() {
+    return version;
+  }
+
+  public List<OmKeyLocationInfo> getLocationList() {
+    return locationList;
+  }
+
+  public KeyLocationList getProtobuf() {
+    return KeyLocationList.newBuilder()
+        .setVersion(version)
+        .addAllKeyLocations(
+            locationList.stream().map(OmKeyLocationInfo::getProtobuf)
+                .collect(Collectors.toList()))
+        .build();
+  }
+
+  public static OmKeyLocationInfoGroup getFromProtobuf(
+      KeyLocationList keyLocationList) {
+    return new OmKeyLocationInfoGroup(
+        keyLocationList.getVersion(),
+        keyLocationList.getKeyLocationsList().stream()
+            .map(OmKeyLocationInfo::getFromProtobuf)
+            .collect(Collectors.toList()));
+  }
+
+  /**
+   * Given a new block location, generate a new version list based upon this
+   * one.
+   *
+   * @param newLocationList a list of new location to be added.
+   * @return
+   */
+  OmKeyLocationInfoGroup generateNextVersion(
+      List<OmKeyLocationInfo> newLocationList) throws IOException {
+    // TODO : revisit if we can do this method more efficiently
+    // one potential inefficiency here is that later version always include
+    // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add
+    // more
+    List<OmKeyLocationInfo> newList = new ArrayList<>();
+    newList.addAll(locationList);
+    for (OmKeyLocationInfo newInfo : newLocationList) {
+      // all these new blocks will have addVersion of current version + 1
+      newInfo.setCreateVersion(version + 1);
+      newList.add(newInfo);
+    }
+    return new OmKeyLocationInfoGroup(version + 1, newList);
+  }
+
+  void appendNewBlocks(List<OmKeyLocationInfo> newLocationList)
+      throws IOException {
+    for (OmKeyLocationInfo info : newLocationList) {
+      info.setCreateVersion(version);
+      locationList.add(info);
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("version:").append(version).append(" ");
+    for (OmKeyLocationInfo kli : locationList) {
+      sb.append(kli.getLocalID()).append(" || ");
+    }
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
new file mode 100644
index 0000000..de75a05
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
+
+import java.util.List;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.ArrayList;
+import java.util.HashMap;
+
+/**
+ * This helper class keeps a map of all user and their permissions.
+ */
+public class OmOzoneAclMap {
+  // per Acl Type user:rights map
+  private ArrayList<Map<String, OzoneAclRights>> aclMaps;
+
+  OmOzoneAclMap() {
+    aclMaps = new ArrayList<>();
+    for (OzoneAclType aclType : OzoneAclType.values()) {
+      aclMaps.add(aclType.ordinal(), new HashMap<>());
+    }
+  }
+
+  private Map<String, OzoneAclRights> getMap(OzoneAclType type) {
+    return aclMaps.get(type.ordinal());
+  }
+
+  // For a given acl type and user, get the stored acl
+  private OzoneAclRights getAcl(OzoneAclType type, String user) {
+    return getMap(type).get(user);
+  }
+
+  // Add a new acl to the map
+  public void addAcl(OzoneAclInfo acl) {
+    getMap(acl.getType()).put(acl.getName(), acl.getRights());
+  }
+
+  // for a given acl, check if the user has access rights
+  public boolean hasAccess(OzoneAclInfo acl) {
+    OzoneAclRights storedRights = getAcl(acl.getType(), acl.getName());
+    if (storedRights != null) {
+      switch (acl.getRights()) {
+      case READ:
+        return (storedRights == OzoneAclRights.READ)
+            || (storedRights == OzoneAclRights.READ_WRITE);
+      case WRITE:
+        return (storedRights == OzoneAclRights.WRITE)
+            || (storedRights == OzoneAclRights.READ_WRITE);
+      case READ_WRITE:
+        return (storedRights == OzoneAclRights.READ_WRITE);
+      default:
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+
+  // Convert this map to OzoneAclInfo Protobuf List
+  public List<OzoneAclInfo> ozoneAclGetProtobuf() {
+    List<OzoneAclInfo> aclList = new LinkedList<>();
+    for (OzoneAclType type: OzoneAclType.values()) {
+      for (Map.Entry<String, OzoneAclRights> entry :
+          aclMaps.get(type.ordinal()).entrySet()) {
+        OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder()
+            .setName(entry.getKey())
+            .setType(type)
+            .setRights(entry.getValue())
+            .build();
+        aclList.add(aclInfo);
+      }
+    }
+
+    return aclList;
+  }
+
+  // Create map from list of OzoneAclInfos
+  public static OmOzoneAclMap ozoneAclGetFromProtobuf(
+      List<OzoneAclInfo> aclList) {
+    OmOzoneAclMap aclMap = new OmOzoneAclMap();
+    for (OzoneAclInfo acl : aclList) {
+      aclMap.addAcl(acl);
+    }
+    return aclMap;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
new file mode 100644
index 0000000..c8b59b6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -0,0 +1,223 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+
+/**
+ * A class that encapsulates the OmVolumeArgs Args.
+ */
+public final class OmVolumeArgs {
+  private final String adminName;
+  private final String ownerName;
+  private final String volume;
+  private final long creationTime;
+  private final long quotaInBytes;
+  private final Map<String, String> keyValueMap;
+  private final OmOzoneAclMap aclMap;
+
+  /**
+   * Private constructor, constructed via builder.
+   * @param adminName  - Administrator's name.
+   * @param ownerName  - Volume owner's name
+   * @param volume - volume name
+   * @param quotaInBytes - Volume Quota in bytes.
+   * @param keyValueMap - keyValue map.
+   * @param aclMap - User to access rights map.
+   * @param creationTime - Volume creation time.
+   */
+  private OmVolumeArgs(String adminName, String ownerName, String volume,
+                       long quotaInBytes, Map<String, String> keyValueMap,
+                       OmOzoneAclMap aclMap, long creationTime) {
+    this.adminName = adminName;
+    this.ownerName = ownerName;
+    this.volume = volume;
+    this.quotaInBytes = quotaInBytes;
+    this.keyValueMap = keyValueMap;
+    this.aclMap = aclMap;
+    this.creationTime = creationTime;
+  }
+
+  /**
+   * Returns the Admin Name.
+   * @return String.
+   */
+  public String getAdminName() {
+    return adminName;
+  }
+
+  /**
+   * Returns the owner Name.
+   * @return String
+   */
+  public String getOwnerName() {
+    return ownerName;
+  }
+
+  /**
+   * Returns the volume Name.
+   * @return String
+   */
+  public String getVolume() {
+    return volume;
+  }
+
+  /**
+   * Returns creation time.
+   * @return long
+   */
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  /**
+   * Returns Quota in Bytes.
+   * @return long, Quota in bytes.
+   */
+  public long getQuotaInBytes() {
+    return quotaInBytes;
+  }
+
+  public Map<String, String> getKeyValueMap() {
+    return keyValueMap;
+  }
+
+  public OmOzoneAclMap getAclMap() {
+    return aclMap;
+  }
+  /**
+   * Returns new builder class that builds a OmVolumeArgs.
+   *
+   * @return Builder
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for OmVolumeArgs.
+   */
+  public static class Builder {
+    private String adminName;
+    private String ownerName;
+    private String volume;
+    private long creationTime;
+    private long quotaInBytes;
+    private Map<String, String> keyValueMap;
+    private OmOzoneAclMap aclMap;
+
+    /**
+     * Constructs a builder.
+     */
+    Builder() {
+      keyValueMap = new HashMap<>();
+      aclMap = new OmOzoneAclMap();
+    }
+
+    public Builder setAdminName(String admin) {
+      this.adminName = admin;
+      return this;
+    }
+
+    public Builder setOwnerName(String owner) {
+      this.ownerName = owner;
+      return this;
+    }
+
+    public Builder setVolume(String volumeName) {
+      this.volume = volumeName;
+      return this;
+    }
+
+    public Builder setCreationTime(long createdOn) {
+      this.creationTime = createdOn;
+      return this;
+    }
+
+    public Builder setQuotaInBytes(long quota) {
+      this.quotaInBytes = quota;
+      return this;
+    }
+
+    public Builder addMetadata(String key, String value) {
+      keyValueMap.put(key, value); // overwrite if present.
+      return this;
+    }
+
+    public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException {
+      aclMap.addAcl(acl);
+      return this;
+    }
+
+    /**
+     * Constructs a CreateVolumeArgument.
+     * @return CreateVolumeArgs.
+     */
+    public OmVolumeArgs build() {
+      Preconditions.checkNotNull(adminName);
+      Preconditions.checkNotNull(ownerName);
+      Preconditions.checkNotNull(volume);
+      return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
+          keyValueMap, aclMap, creationTime);
+    }
+  }
+
+  public VolumeInfo getProtobuf() {
+    List<KeyValue> metadataList = new LinkedList<>();
+    for (Map.Entry<String, String> entry : keyValueMap.entrySet()) {
+      metadataList.add(KeyValue.newBuilder().setKey(entry.getKey()).
+          setValue(entry.getValue()).build());
+    }
+    List<OzoneAclInfo> aclList = aclMap.ozoneAclGetProtobuf();
+
+    return VolumeInfo.newBuilder()
+        .setAdminName(adminName)
+        .setOwnerName(ownerName)
+        .setVolume(volume)
+        .setQuotaInBytes(quotaInBytes)
+        .addAllMetadata(metadataList)
+        .addAllVolumeAcls(aclList)
+        .setCreationTime(creationTime)
+        .build();
+  }
+
+  public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) {
+    Map<String, String> kvMap = volInfo.getMetadataList().stream()
+        .collect(Collectors.toMap(KeyValue::getKey,
+            KeyValue::getValue));
+    OmOzoneAclMap aclMap =
+        OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList());
+
+    return new OmVolumeArgs(volInfo.getAdminName(), volInfo.getOwnerName(),
+        volInfo.getVolume(), volInfo.getQuotaInBytes(), kvMap, aclMap,
+        volInfo.getCreationTime());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
new file mode 100644
index 0000000..bc364e6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+/**
+ * This class represents a open key "session". A session here means a key is
+ * opened by a specific client, the client sends the handler to server, such
+ * that servers can recognize this client, and thus know how to close the key.
+ */
+public class OpenKeySession {
+  private final int id;
+  private final OmKeyInfo keyInfo;
+  // the version of the key when it is being opened in this session.
+  // a block that has a create version equals to open version means it will
+  // be committed only when this open session is closed.
+  private long openVersion;
+
+  public OpenKeySession(int id, OmKeyInfo info, long version) {
+    this.id = id;
+    this.keyInfo = info;
+    this.openVersion = version;
+  }
+
+  public long getOpenVersion() {
+    return this.openVersion;
+  }
+
+  public OmKeyInfo getKeyInfo() {
+    return keyInfo;
+  }
+
+  public int getId() {
+    return id;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
new file mode 100644
index 0000000..9b03aef
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ServicePort;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+/**
+ * ServiceInfo holds the config details of Ozone services.
+ */
+public final class ServiceInfo {
+
+  private static final ObjectReader READER =
+      new ObjectMapper().readerFor(ServiceInfo.class);
+  private static final ObjectWriter WRITER =
+      new ObjectMapper().writerWithDefaultPrettyPrinter();
+
+  /**
+   * Type of node/service.
+   */
+  private NodeType nodeType;
+  /**
+   * Hostname of the node in which the service is running.
+   */
+  private String hostname;
+
+  /**
+   * List of ports the service listens to.
+   */
+  private Map<ServicePort.Type, Integer> ports;
+
+  /**
+   * Default constructor for JSON deserialization.
+   */
+  public ServiceInfo() {}
+
+  /**
+   * Constructs the ServiceInfo for the {@code nodeType}.
+   * @param nodeType type of node/service
+   * @param hostname hostname of the service
+   * @param portList list of ports the service listens to
+   */
+  private ServiceInfo(
+      NodeType nodeType, String hostname, List<ServicePort> portList) {
+    Preconditions.checkNotNull(nodeType);
+    Preconditions.checkNotNull(hostname);
+    this.nodeType = nodeType;
+    this.hostname = hostname;
+    this.ports = new HashMap<>();
+    for (ServicePort port : portList) {
+      ports.put(port.getType(), port.getValue());
+    }
+  }
+
+  /**
+   * Returns the type of node/service.
+   * @return node type
+   */
+  public NodeType getNodeType() {
+    return nodeType;
+  }
+
+  /**
+   * Returns the hostname of the service.
+   * @return hostname
+   */
+  public String getHostname() {
+    return hostname;
+  }
+
+  /**
+   * Returns ServicePort.Type to port mappings.
+   * @return ports
+   */
+  public Map<ServicePort.Type, Integer> getPorts() {
+    return ports;
+  }
+
+  /**
+   * Returns the port for given type, null if the service doesn't support
+   * the type.
+   *
+   * @param type the type of port.
+   *             ex: RPC, HTTP, HTTPS, etc..
+   */
+  @JsonIgnore
+  public int getPort(ServicePort.Type type) {
+    return ports.get(type);
+  }
+
+  /**
+   * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo.
+   *
+   * @return OzoneManagerProtocolProtos.ServiceInfo
+   */
+  @JsonIgnore
+  public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() {
+    OzoneManagerProtocolProtos.ServiceInfo.Builder builder =
+        OzoneManagerProtocolProtos.ServiceInfo.newBuilder();
+    builder.setNodeType(nodeType)
+        .setHostname(hostname)
+        .addAllServicePorts(
+            ports.entrySet().stream()
+                .map(
+                    entry ->
+                        ServicePort.newBuilder()
+                            .setType(entry.getKey())
+                            .setValue(entry.getValue()).build())
+                .collect(Collectors.toList()));
+    return builder.build();
+  }
+
+  /**
+   * Converts OzoneManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}.
+   *
+   * @return {@link ServiceInfo}
+   */
+  @JsonIgnore
+  public static ServiceInfo getFromProtobuf(
+      OzoneManagerProtocolProtos.ServiceInfo serviceInfo) {
+    return new ServiceInfo(serviceInfo.getNodeType(),
+        serviceInfo.getHostname(),
+        serviceInfo.getServicePortsList());
+  }
+
+  /**
+   * Returns a JSON string of this object.
+   *
+   * @return String - json string
+   * @throws IOException
+   */
+  public String toJsonString() throws IOException {
+    return WRITER.writeValueAsString(this);
+  }
+
+  /**
+   * Parse a JSON string into ServiceInfo Object.
+   *
+   * @param jsonString Json String
+   * @return BucketInfo
+   * @throws IOException
+   */
+  public static BucketInfo parse(String jsonString) throws IOException {
+    return READER.readValue(jsonString);
+  }
+
+  /**
+   * Creates a new builder to build {@link ServiceInfo}.
+   * @return {@link ServiceInfo.Builder}
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder used to build/construct {@link ServiceInfo}.
+   */
+  public static class Builder {
+
+    private NodeType node;
+    private String host;
+    private List<ServicePort> portList = new ArrayList<>();
+
+
+    /**
+     * Sets the node/service type.
+     * @param nodeType type of node
+     * @return the builder
+     */
+    public Builder setNodeType(NodeType nodeType) {
+      node = nodeType;
+      return this;
+    }
+
+    /**
+     * Sets the hostname of the service.
+     * @param hostname service hostname
+     * @return the builder
+     */
+    public Builder setHostname(String hostname) {
+      host = hostname;
+      return this;
+    }
+
+    /**
+     * Adds the service port to the service port list.
+     * @param servicePort RPC port
+     * @return the builder
+     */
+    public Builder addServicePort(ServicePort servicePort) {
+      portList.add(servicePort);
+      return this;
+    }
+
+
+    /**
+     * Builds and returns {@link ServiceInfo} with the set values.
+     * @return {@link ServiceInfo}
+     */
+    public ServiceInfo build() {
+      return new ServiceInfo(node, host, portList);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
new file mode 100644
index 0000000..6fc7c8f
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A class that encapsulates the createVolume Args.
+ */
+public final class VolumeArgs {
+  private final String adminName;
+  private final String ownerName;
+  private final String volume;
+  private final long quotaInBytes;
+  private final Map<String, String> extendedAttributes;
+
+  /**
+   * Private constructor, constructed via builder.
+   *
+   * @param adminName - Administrator name.
+   * @param ownerName - Volume owner's name
+   * @param volume - volume name
+   * @param quotaInBytes - Volume Quota in bytes.
+   * @param keyValueMap - keyValue map.
+   */
+  private VolumeArgs(String adminName, String ownerName, String volume,
+      long quotaInBytes, Map<String, String> keyValueMap) {
+    this.adminName = adminName;
+    this.ownerName = ownerName;
+    this.volume = volume;
+    this.quotaInBytes = quotaInBytes;
+    this.extendedAttributes = keyValueMap;
+  }
+
+  /**
+   * Returns the Admin Name.
+   *
+   * @return String.
+   */
+  public String getAdminName() {
+    return adminName;
+  }
+
+  /**
+   * Returns the owner Name.
+   *
+   * @return String
+   */
+  public String getOwnerName() {
+    return ownerName;
+  }
+
+  /**
+   * Returns the volume Name.
+   *
+   * @return String
+   */
+  public String getVolume() {
+    return volume;
+  }
+
+  /**
+   * Returns Quota in Bytes.
+   *
+   * @return long, Quota in bytes.
+   */
+  public long getQuotaInBytes() {
+    return quotaInBytes;
+  }
+
+  public Map<String, String> getExtendedAttributes() {
+    return extendedAttributes;
+  }
+
+  static class Builder {
+    private String adminName;
+    private String ownerName;
+    private String volume;
+    private long quotaInBytes;
+    private Map<String, String> extendedAttributes;
+
+    /**
+     * Constructs a builder.
+     */
+    Builder() {
+      extendedAttributes = new HashMap<>();
+    }
+
+    public void setAdminName(String adminName) {
+      this.adminName = adminName;
+    }
+
+    public void setOwnerName(String ownerName) {
+      this.ownerName = ownerName;
+    }
+
+    public void setVolume(String volume) {
+      this.volume = volume;
+    }
+
+    public void setQuotaInBytes(long quotaInBytes) {
+      this.quotaInBytes = quotaInBytes;
+    }
+
+    public void addMetadata(String key, String value) {
+      extendedAttributes.put(key, value); // overwrite if present.
+    }
+
+    /**
+     * Constructs a CreateVolumeArgument.
+     *
+     * @return CreateVolumeArgs.
+     */
+    public VolumeArgs build() {
+      Preconditions.checkNotNull(adminName);
+      Preconditions.checkNotNull(ownerName);
+      Preconditions.checkNotNull(volume);
+      return new VolumeArgs(adminName, ownerName, volume, quotaInBytes,
+          extendedAttributes);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
new file mode 100644
index 0000000..b1211d8
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..1744cff
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+/**
+ This package contains client side protocol library to communicate with OM.
+ */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
new file mode 100644
index 0000000..b7a099d
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -0,0 +1,252 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocol;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Protocol to talk to OM.
+ */
+public interface OzoneManagerProtocol {
+
+  /**
+   * Creates a volume.
+   * @param args - Arguments to create Volume.
+   * @throws IOException
+   */
+  void createVolume(OmVolumeArgs args) throws IOException;
+
+  /**
+   * Changes the owner of a volume.
+   * @param volume  - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  void setOwner(String volume, String owner) throws IOException;
+
+  /**
+   * Changes the Quota on a volume.
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  void setQuota(String volume, long quota) throws IOException;
+
+  /**
+   * Checks if the specified user can access this volume.
+   * @param volume - volume
+   * @param userAcl - user acls which needs to be checked for access
+   * @return true if the user has required access for the volume,
+   *         false otherwise
+   * @throws IOException
+   */
+  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException;
+
+  /**
+   * Gets the volume information.
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  OmVolumeArgs getVolumeInfo(String volume) throws IOException;
+
+  /**
+   * Deletes an existing empty volume.
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  void deleteVolume(String volume) throws IOException;
+
+  /**
+   * Lists volume owned by a specific user.
+   * @param userName - user name
+   * @param prefix  - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  List<OmVolumeArgs> listVolumeByUser(String userName, String prefix, String
+      prevKey, int maxKeys) throws IOException;
+
+  /**
+   * Lists volume all volumes in the cluster.
+   * @param prefix  - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  List<OmVolumeArgs> listAllVolumes(String prefix, String
+      prevKey, int maxKeys) throws IOException;
+
+  /**
+   * Creates a bucket.
+   * @param bucketInfo - BucketInfo to create Bucket.
+   * @throws IOException
+   */
+  void createBucket(OmBucketInfo bucketInfo) throws IOException;
+
+  /**
+   * Gets the bucket information.
+   * @param volumeName - Volume name.
+   * @param bucketName - Bucket name.
+   * @return OmBucketInfo or exception is thrown.
+   * @throws IOException
+   */
+  OmBucketInfo getBucketInfo(String volumeName, String bucketName)
+      throws IOException;
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  void setBucketProperty(OmBucketArgs args) throws IOException;
+
+  /**
+   * Open the given key and return an open key session.
+   *
+   * @param args the args of the key.
+   * @return OpenKeySession instance that client uses to talk to container.
+   * @throws IOException
+   */
+  OpenKeySession openKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Commit a key. This will make the change from the client visible. The client
+   * is identified by the clientID.
+   *
+   * @param args the key to commit
+   * @param clientID the client identification
+   * @throws IOException
+   */
+  void commitKey(OmKeyArgs args, int clientID) throws IOException;
+
+  /**
+   * Allocate a new block, it is assumed that the client is having an open key
+   * session going on. This block will be appended to this open key session.
+   *
+   * @param args the key to append
+   * @param clientID the client identification
+   * @return an allocated block
+   * @throws IOException
+   */
+  OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException;
+
+  /**
+   * Look up for the container of an existing key.
+   *
+   * @param args the args of the key.
+   * @return OmKeyInfo instance that client uses to talk to container.
+   * @throws IOException
+   */
+  OmKeyInfo lookupKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Rename an existing key within a bucket
+   * @param args the args of the key.
+   * @param toKeyName New name to be used for the Key
+   */
+  void renameKey(OmKeyArgs args, String toKeyName) throws IOException;
+
+  /**
+   * Deletes an existing key.
+   *
+   * @param args the args of the key.
+   * @throws IOException
+   */
+  void deleteKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volume - Name of the volume.
+   * @param bucket - Name of the bucket.
+   * @throws IOException
+   */
+  void deleteBucket(String volume, String bucket) throws IOException;
+
+  /**
+   * Returns a list of buckets represented by {@link OmBucketInfo}
+   * in the given volume. Argument volumeName is required, others
+   * are optional.
+   *
+   * @param volumeName
+   *   the name of the volume.
+   * @param startBucketName
+   *   the start bucket name, only the buckets whose name is
+   *   after this value will be included in the result.
+   * @param bucketPrefix
+   *   bucket name prefix, only the buckets whose name has
+   *   this prefix will be included in the result.
+   * @param maxNumOfBuckets
+   *   the maximum number of buckets to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of buckets.
+   * @throws IOException
+   */
+  List<OmBucketInfo> listBuckets(String volumeName,
+                                 String startBucketName, String bucketPrefix, int maxNumOfBuckets)
+      throws IOException;
+
+  /**
+   * Returns a list of keys represented by {@link OmKeyInfo}
+   * in the given bucket. Argument volumeName, bucketName is required,
+   * others are optional.
+   *
+   * @param volumeName
+   *   the name of the volume.
+   * @param bucketName
+   *   the name of the bucket.
+   * @param startKeyName
+   *   the start key name, only the keys whose name is
+   *   after this value will be included in the result.
+   * @param keyPrefix
+   *   key name prefix, only the keys whose name has
+   *   this prefix will be included in the result.
+   * @param maxKeys
+   *   the maximum number of keys to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of keys.
+   * @throws IOException
+   */
+  List<OmKeyInfo> listKeys(String volumeName,
+                           String bucketName, String startKeyName, String keyPrefix, int maxKeys)
+      throws IOException;
+
+  /**
+   * Returns list of Ozone services with its configuration details.
+   *
+   * @return list of Ozone services
+   * @throws IOException
+   */
+  List<ServiceInfo> getServiceList() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
new file mode 100644
index 0000000..9c7f388
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.protocol;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
new file mode 100644
index 0000000..37151fb
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -0,0 +1,769 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocolPB;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListResponse;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.stream.Collectors;
+
+/**
+ *  The client side implementation of OzoneManagerProtocol.
+ */
+
+@InterfaceAudience.Private
+public final class OzoneManagerProtocolClientSideTranslatorPB
+    implements OzoneManagerProtocol, ProtocolTranslator, Closeable {
+
+  /**
+   * RpcController is not used and hence is set to null.
+   */
+  private static final RpcController NULL_RPC_CONTROLLER = null;
+
+  private final OzoneManagerProtocolPB rpcProxy;
+
+  /**
+   * Constructor for KeySpaceManger Client.
+   * @param rpcProxy
+   */
+  public OzoneManagerProtocolClientSideTranslatorPB(
+      OzoneManagerProtocolPB rpcProxy) {
+    this.rpcProxy = rpcProxy;
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   * <p>
+   * <p> As noted in {@link AutoCloseable#close()}, cases where the
+   * close may fail require careful attention. It is strongly advised
+   * to relinquish the underlying resources and to internally
+   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
+   * the {@code IOException}.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override
+  public void close() throws IOException {
+
+  }
+
+  /**
+   * Creates a volume.
+   *
+   * @param args - Arguments to create Volume.
+   * @throws IOException
+   */
+  @Override
+  public void createVolume(OmVolumeArgs args) throws IOException {
+    CreateVolumeRequest.Builder req =
+        CreateVolumeRequest.newBuilder();
+    VolumeInfo volumeInfo = args.getProtobuf();
+    req.setVolumeInfo(volumeInfo);
+
+    final CreateVolumeResponse resp;
+    try {
+      resp = rpcProxy.createVolume(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Volume creation failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  @Override
+  public void setOwner(String volume, String owner) throws IOException {
+    SetVolumePropertyRequest.Builder req =
+        SetVolumePropertyRequest.newBuilder();
+    req.setVolumeName(volume).setOwnerName(owner);
+    final SetVolumePropertyResponse resp;
+    try {
+      resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Volume owner change failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  @Override
+  public void setQuota(String volume, long quota) throws IOException {
+    SetVolumePropertyRequest.Builder req =
+        SetVolumePropertyRequest.newBuilder();
+    req.setVolumeName(volume).setQuotaInBytes(quota);
+    final SetVolumePropertyResponse resp;
+    try {
+      resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Volume quota change failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Checks if the specified user can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acls which needs to be checked for access
+   * @return true if the user has required access for the volume,
+   *         false otherwise
+   * @throws IOException
+   */
+  @Override
+  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws
+      IOException {
+    CheckVolumeAccessRequest.Builder req =
+        CheckVolumeAccessRequest.newBuilder();
+    req.setVolumeName(volume).setUserAcl(userAcl);
+    final CheckVolumeAccessResponse resp;
+    try {
+      resp = rpcProxy.checkVolumeAccess(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() == Status.ACCESS_DENIED) {
+      return false;
+    } else if (resp.getStatus() == Status.OK) {
+      return true;
+    } else {
+      throw new
+          IOException("Check Volume Access failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Gets the volume information.
+   *
+   * @param volume - Volume name.
+   * @return OmVolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  @Override
+  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+    InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder();
+    req.setVolumeName(volume);
+    final InfoVolumeResponse resp;
+    try {
+      resp = rpcProxy.infoVolume(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Info Volume failed, error:" + resp.getStatus());
+    }
+    return OmVolumeArgs.getFromProtobuf(resp.getVolumeInfo());
+  }
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  @Override
+  public void deleteVolume(String volume) throws IOException {
+    DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder();
+    req.setVolumeName(volume);
+    final DeleteVolumeResponse resp;
+    try {
+      resp = rpcProxy.deleteVolume(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Delete Volume failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * Lists volume owned by a specific user.
+   *
+   * @param userName - user name
+   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the
+   * prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  @Override
+  public List<OmVolumeArgs> listVolumeByUser(String userName, String prefix,
+                                             String prevKey, int maxKeys)
+      throws IOException {
+    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
+    if (!Strings.isNullOrEmpty(prefix)) {
+      builder.setPrefix(prefix);
+    }
+    if (!Strings.isNullOrEmpty(prevKey)) {
+      builder.setPrevKey(prevKey);
+    }
+    builder.setMaxKeys(maxKeys);
+    builder.setUserName(userName);
+    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_USER);
+    return listVolume(builder.build());
+  }
+
+  /**
+   * Lists volume all volumes in the cluster.
+   *
+   * @param prefix - Filter prefix -- Return only entries that match this.
+   * @param prevKey - Previous key -- List starts from the next from the
+   * prevkey
+   * @param maxKeys - Max number of keys to return.
+   * @return List of Volumes.
+   * @throws IOException
+   */
+  @Override
+  public List<OmVolumeArgs> listAllVolumes(String prefix, String prevKey,
+                                           int maxKeys) throws IOException {
+    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
+    if (!Strings.isNullOrEmpty(prefix)) {
+      builder.setPrefix(prefix);
+    }
+    if (!Strings.isNullOrEmpty(prevKey)) {
+      builder.setPrevKey(prevKey);
+    }
+    builder.setMaxKeys(maxKeys);
+    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER);
+    return listVolume(builder.build());
+  }
+
+  private List<OmVolumeArgs> listVolume(ListVolumeRequest request)
+      throws IOException {
+    final ListVolumeResponse resp;
+    try {
+      resp = rpcProxy.listVolumes(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("List volume failed, error: "
+          + resp.getStatus());
+    }
+
+    List<OmVolumeArgs> result = Lists.newArrayList();
+    for (VolumeInfo volInfo : resp.getVolumeInfoList()) {
+      OmVolumeArgs volArgs = OmVolumeArgs.getFromProtobuf(volInfo);
+      result.add(volArgs);
+    }
+
+    return resp.getVolumeInfoList().stream()
+        .map(item -> OmVolumeArgs.getFromProtobuf(item))
+        .collect(Collectors.toList());
+  }
+
+  /**
+   * Creates a bucket.
+   *
+   * @param bucketInfo - BucketInfo to create bucket.
+   * @throws IOException
+   */
+  @Override
+  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
+    CreateBucketRequest.Builder req =
+        CreateBucketRequest.newBuilder();
+    BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf();
+    req.setBucketInfo(bucketInfoProtobuf);
+
+    final CreateBucketResponse resp;
+    try {
+      resp = rpcProxy.createBucket(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Bucket creation failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * Gets the bucket information.
+   *
+   * @param volume - Volume name.
+   * @param bucket - Bucket name.
+   * @return OmBucketInfo or exception is thrown.
+   * @throws IOException
+   */
+  @Override
+  public OmBucketInfo getBucketInfo(String volume, String bucket)
+      throws IOException {
+    InfoBucketRequest.Builder req =
+        InfoBucketRequest.newBuilder();
+    req.setVolumeName(volume);
+    req.setBucketName(bucket);
+
+    final InfoBucketResponse resp;
+    try {
+      resp = rpcProxy.infoBucket(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() == Status.OK) {
+      return OmBucketInfo.getFromProtobuf(resp.getBucketInfo());
+    } else {
+      throw new IOException("Info Bucket failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  @Override
+  public void setBucketProperty(OmBucketArgs args)
+      throws IOException {
+    SetBucketPropertyRequest.Builder req =
+        SetBucketPropertyRequest.newBuilder();
+    BucketArgs bucketArgs = args.getProtobuf();
+    req.setBucketArgs(bucketArgs);
+    final SetBucketPropertyResponse resp;
+    try {
+      resp = rpcProxy.setBucketProperty(NULL_RPC_CONTROLLER,
+          req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Setting bucket property failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * List buckets in a volume.
+   *
+   * @param volumeName
+   * @param startKey
+   * @param prefix
+   * @param count
+   * @return
+   * @throws IOException
+   */
+  @Override
+  public List<OmBucketInfo> listBuckets(String volumeName,
+                                        String startKey, String prefix, int count) throws IOException {
+    List<OmBucketInfo> buckets = new ArrayList<>();
+    ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder();
+    reqBuilder.setVolumeName(volumeName);
+    reqBuilder.setCount(count);
+    if (startKey != null) {
+      reqBuilder.setStartKey(startKey);
+    }
+    if (prefix != null) {
+      reqBuilder.setPrefix(prefix);
+    }
+    ListBucketsRequest request = reqBuilder.build();
+    final ListBucketsResponse resp;
+    try {
+      resp = rpcProxy.listBuckets(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() == Status.OK) {
+      buckets.addAll(
+          resp.getBucketInfoList().stream()
+              .map(OmBucketInfo::getFromProtobuf)
+              .collect(Collectors.toList()));
+      return buckets;
+    } else {
+      throw new IOException("List Buckets failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * Create a new open session of the key, then use the returned meta info to
+   * talk to data node to actually write the key.
+   * @param args the args for the key to be allocated
+   * @return a handler to the key, returned client
+   * @throws IOException
+   */
+  @Override
+  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
+    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setFactor(args.getFactor())
+        .setType(args.getType())
+        .setKeyName(args.getKeyName());
+    if (args.getDataSize() > 0) {
+      keyArgs.setDataSize(args.getDataSize());
+    }
+    req.setKeyArgs(keyArgs.build());
+
+    final LocateKeyResponse resp;
+    try {
+      resp = rpcProxy.createKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Create key failed, error:" + resp.getStatus());
+    }
+    return new OpenKeySession(resp.getID(),
+        OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion());
+  }
+
+  @Override
+  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException {
+    AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName())
+        .setDataSize(args.getDataSize()).build();
+    req.setKeyArgs(keyArgs);
+    req.setClientID(clientID);
+
+    final AllocateBlockResponse resp;
+    try {
+      resp = rpcProxy.allocateBlock(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Allocate block failed, error:" +
+          resp.getStatus());
+    }
+    return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation());
+  }
+
+  @Override
+  public void commitKey(OmKeyArgs args, int clientID)
+      throws IOException {
+    CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName())
+        .setDataSize(args.getDataSize()).build();
+    req.setKeyArgs(keyArgs);
+    req.setClientID(clientID);
+
+    final CommitKeyResponse resp;
+    try {
+      resp = rpcProxy.commitKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Commit key failed, error:" +
+          resp.getStatus());
+    }
+  }
+
+
+  @Override
+  public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
+    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName())
+        .setDataSize(args.getDataSize()).build();
+    req.setKeyArgs(keyArgs);
+
+    final LocateKeyResponse resp;
+    try {
+      resp = rpcProxy.lookupKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Lookup key failed, error:" +
+          resp.getStatus());
+    }
+    return OmKeyInfo.getFromProtobuf(resp.getKeyInfo());
+  }
+
+  @Override
+  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
+    RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName())
+        .setDataSize(args.getDataSize()).build();
+    req.setKeyArgs(keyArgs);
+    req.setToKeyName(toKeyName);
+
+    final RenameKeyResponse resp;
+    try {
+      resp = rpcProxy.renameKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Rename key failed, error:" +
+          resp.getStatus());
+    }
+  }
+
+  /**
+   * Deletes an existing key.
+   *
+   * @param args the args of the key.
+   * @throws IOException
+   */
+  @Override
+  public void deleteKey(OmKeyArgs args) throws IOException {
+    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+    KeyArgs keyArgs = KeyArgs.newBuilder()
+        .setVolumeName(args.getVolumeName())
+        .setBucketName(args.getBucketName())
+        .setKeyName(args.getKeyName()).build();
+    req.setKeyArgs(keyArgs);
+
+    final LocateKeyResponse resp;
+    try {
+      resp = rpcProxy.deleteKey(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new IOException("Delete key failed, error:" +
+          resp.getStatus());
+    }
+  }
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volume - Name of the volume.
+   * @param bucket - Name of the bucket.
+   * @throws IOException
+   */
+  public void deleteBucket(String volume, String bucket) throws IOException {
+    DeleteBucketRequest.Builder req = DeleteBucketRequest.newBuilder();
+    req.setVolumeName(volume);
+    req.setBucketName(bucket);
+    final DeleteBucketResponse resp;
+    try {
+      resp = rpcProxy.deleteBucket(NULL_RPC_CONTROLLER, req.build());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if (resp.getStatus() != Status.OK) {
+      throw new
+          IOException("Delete Bucket failed, error:" + resp.getStatus());
+    }
+  }
+
+  /**
+   * List keys in a bucket.
+   */
+  @Override
+  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+                                  String startKey, String prefix, int maxKeys) throws IOException {
+    List<OmKeyInfo> keys = new ArrayList<>();
+    ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder();
+    reqBuilder.setVolumeName(volumeName);
+    reqBuilder.setBucketName(bucketName);
+    reqBuilder.setCount(maxKeys);
+
+    if (startKey != null) {
+      reqBuilder.setStartKey(startKey);
+    }
+
+    if (prefix != null) {
+      reqBuilder.setPrefix(prefix);
+    }
+
+    ListKeysRequest request = reqBuilder.build();
+    final ListKeysResponse resp;
+    try {
+      resp = rpcProxy.listKeys(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() == Status.OK) {
+      keys.addAll(
+          resp.getKeyInfoList().stream()
+              .map(OmKeyInfo::getFromProtobuf)
+              .collect(Collectors.toList()));
+      return keys;
+    } else {
+      throw new IOException("List Keys failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  @Override
+  public List<ServiceInfo> getServiceList() throws IOException {
+    ServiceListRequest request = ServiceListRequest.newBuilder().build();
+    final ServiceListResponse resp;
+    try {
+      resp = rpcProxy.getServiceList(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if (resp.getStatus() == Status.OK) {
+      return resp.getServiceInfoList().stream()
+              .map(ServiceInfo::getFromProtobuf)
+              .collect(Collectors.toList());
+    } else {
+      throw new IOException("Getting service list failed, error: "
+          + resp.getStatus());
+    }
+  }
+
+  /**
+   * Return the proxy object underlying this protocol translator.
+   *
+   * @return the proxy object underlying this protocol translator.
+   */
+  @Override
+  public Object getUnderlyingProxyObject() {
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
new file mode 100644
index 0000000..e0879d6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneManagerService;
+
+/**
+ * Protocol used to communicate with OM.
+ */
+@ProtocolInfo(protocolName =
+    "org.apache.hadoop.ozone.protocol.OzoneManagerProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.Private
+public interface OzoneManagerProtocolPB
+    extends OzoneManagerService.BlockingInterface {
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
new file mode 100644
index 0000000..d595edf
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.protocolPB;
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: yarn.resourcemanager.fail-fast is used inconsistently. Contributed by Yuanbo Liu.

Posted by vi...@apache.org.
yarn.resourcemanager.fail-fast is used inconsistently. Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9ba6f36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9ba6f36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9ba6f36

Branch: refs/heads/HDFS-12090
Commit: d9ba6f3656e8dc97d2813181e27d12e52dca4328
Parents: 59a3038
Author: Junping Du <ju...@apache.org>
Authored: Tue Jul 3 14:46:44 2018 +0800
Committer: Junping Du <ju...@apache.org>
Committed: Tue Jul 3 14:46:44 2018 +0800

----------------------------------------------------------------------
 .../conf/capacity-scheduler.xml                           | 10 ++++++++++
 .../scheduler/capacity/CapacityScheduler.java             |  6 +++---
 .../capacity/CapacitySchedulerConfiguration.java          | 10 ++++++++++
 .../resourcemanager/TestWorkPreservingRMRestart.java      |  2 ++
 4 files changed, 25 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9ba6f36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
index 62654ca..38526d1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml
@@ -207,4 +207,14 @@
     </description>
   </property>
 
+
+  <property>
+    <name>yarn.scheduler.capacity.application.fail-fast</name>
+    <value>false</value>
+    <description>
+      Whether RM should fail during recovery if previous applications'
+      queue is no longer valid.
+    </description>
+  </property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9ba6f36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 54bbf24..b59636a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -786,7 +786,7 @@ public class CapacityScheduler extends
       if (queue == null) {
         //During a restart, this indicates a queue was removed, which is
         //not presently supported
-        if (!YarnConfiguration.shouldRMFailFast(getConfig())) {
+        if (!getConfiguration().shouldAppFailFast(getConfig())) {
           this.rmContext.getDispatcher().getEventHandler().handle(
               new RMAppEvent(applicationId, RMAppEventType.KILL,
                   "Application killed on recovery as it"
@@ -807,7 +807,7 @@ public class CapacityScheduler extends
       if (!(queue instanceof LeafQueue)) {
         // During RM restart, this means leaf queue was converted to a parent
         // queue, which is not supported for running apps.
-        if (!YarnConfiguration.shouldRMFailFast(getConfig())) {
+        if (!getConfiguration().shouldAppFailFast(getConfig())) {
           this.rmContext.getDispatcher().getEventHandler().handle(
               new RMAppEvent(applicationId, RMAppEventType.KILL,
                   "Application killed on recovery as it was "
@@ -866,7 +866,7 @@ public class CapacityScheduler extends
           return autoCreateLeafQueue(placementContext);
         } catch (YarnException | IOException e) {
           if (isRecovery) {
-            if (!YarnConfiguration.shouldRMFailFast(getConfig())) {
+            if (!getConfiguration().shouldAppFailFast(getConfig())) {
               LOG.error("Could not auto-create leaf queue " + queueName +
                   " due to : ", e);
               this.rmContext.getDispatcher().getEventHandler().handle(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9ba6f36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index f94654e..e8de096 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -250,6 +250,12 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
       SCHEDULE_ASYNCHRONOUSLY_PREFIX + ".maximum-pending-backlogs";
 
   @Private
+  public static final String APP_FAIL_FAST = PREFIX + "application.fail-fast";
+
+  @Private
+  public static final boolean DEFAULT_APP_FAIL_FAST = false;
+
+  @Private
   public static final Integer
       DEFAULT_SCHEDULE_ASYNCHRONOUSLY_MAXIMUM_PENDING_BACKLOGS = 100;
 
@@ -1336,6 +1342,10 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
     return getBoolean(LAZY_PREEMPTION_ENABLED, DEFAULT_LAZY_PREEMPTION_ENABLED);
   }
 
+  public boolean shouldAppFailFast(Configuration conf) {
+    return conf.getBoolean(APP_FAIL_FAST, DEFAULT_APP_FAIL_FAST);
+  }
+
   private static final String PREEMPTION_CONFIG_PREFIX =
       "yarn.resourcemanager.monitor.capacity.preemption.";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9ba6f36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
index e4c83e3..88c19a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java
@@ -760,6 +760,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
       MockMemoryRMStateStore memStore, RMState state) throws Exception {
     // Restart RM with fail-fast as false. App should be killed.
     csConf.setBoolean(YarnConfiguration.RM_FAIL_FAST, false);
+    csConf.setBoolean(CapacitySchedulerConfiguration.APP_FAIL_FAST, false);
     rm2 = new MockRM(csConf, memStore);
     rm2.start();
 
@@ -794,6 +795,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
 
     // Now restart RM with fail-fast as true. QueueException should be thrown.
     csConf.setBoolean(YarnConfiguration.RM_FAIL_FAST, true);
+    csConf.setBoolean(CapacitySchedulerConfiguration.APP_FAIL_FAST, true);
     MockRM rm = new MockRM(csConf, memStore2);
     try {
       rm.start();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HADOOP-15571. Multiple FileContexts created with the same configuration object should be allowed to have different umask. Contributed by Vinod Kumar Vavilapalli.

Posted by vi...@apache.org.
HADOOP-15571. Multiple FileContexts created with the same configuration object should be allowed to have different umask. Contributed by Vinod Kumar Vavilapalli.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/498e3bfb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/498e3bfb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/498e3bfb

Branch: refs/heads/HDFS-12090
Commit: 498e3bfb6b93bf542e5581d83e64e920983fe87e
Parents: a129e3e
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu Jul 5 14:19:05 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Fri Jul 6 11:56:09 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/FileContext.java  |  9 ++--
 .../org/apache/hadoop/fs/TestFileContext.java   | 44 +++++++++++++++++++-
 .../logaggregation/AggregatedLogFormat.java     |  6 +--
 3 files changed, 49 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e3bfb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 5215c3c..0b3889b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -219,10 +219,12 @@ public class FileContext {
    * The FileContext is defined by.
    *  1) defaultFS (slash)
    *  2) wd
-   *  3) umask (Obtained by FsPermission.getUMask(conf))
+   *  3) umask (explicitly set via setUMask(),
+   *      falling back to FsPermission.getUMask(conf))
    */   
   private final AbstractFileSystem defaultFS; //default FS for this FileContext.
   private Path workingDir;          // Fully qualified
+  private FsPermission umask;
   private final Configuration conf;
   private final UserGroupInformation ugi;
   final boolean resolveSymlinks;
@@ -575,7 +577,7 @@ public class FileContext {
    * @return the umask of this FileContext
    */
   public FsPermission getUMask() {
-    return FsPermission.getUMask(conf);
+    return (umask != null ? umask : FsPermission.getUMask(conf));
   }
   
   /**
@@ -583,10 +585,9 @@ public class FileContext {
    * @param newUmask  the new umask
    */
   public void setUMask(final FsPermission newUmask) {
-    FsPermission.setUMask(conf, newUmask);
+    this.umask = newUmask;
   }
   
-  
   /**
    * Resolve the path following any symlinks or mount points
    * @param f to be resolved

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e3bfb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
index f5fb06f..60b24c7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContext.java
@@ -17,13 +17,17 @@
  */
 package org.apache.hadoop.fs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.net.URI;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.fail;
-
 public class TestFileContext {
   private static final Logger LOG = LoggerFactory.getLogger(TestFileContext
       .class);
@@ -39,4 +43,40 @@ public class TestFileContext {
       LOG.info("Expected exception: ", ufse);
     }
   }
+
+  @Test
+  public void testConfBasedAndAPIBasedSetUMask() throws Exception {
+
+    Configuration conf = new Configuration();
+
+    String defaultlUMask =
+        conf.get(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY);
+    assertEquals("Default UMask changed!", "022", defaultlUMask);
+
+    URI uri1 = new URI("file://mydfs:50070/");
+    URI uri2 = new URI("file://tmp");
+
+    FileContext fc1 = FileContext.getFileContext(uri1, conf);
+    FileContext fc2 = FileContext.getFileContext(uri2, conf);
+    assertEquals("Umask for fc1 is incorrect", 022, fc1.getUMask().toShort());
+    assertEquals("Umask for fc2 is incorrect", 022, fc2.getUMask().toShort());
+
+    // Till a user explicitly calls FileContext.setUMask(), the updates through
+    // configuration should be reflected..
+    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "011");
+    assertEquals("Umask for fc1 is incorrect", 011, fc1.getUMask().toShort());
+    assertEquals("Umask for fc2 is incorrect", 011, fc2.getUMask().toShort());
+
+    // Stop reflecting the conf update for specific FileContexts, once an
+    // explicit setUMask is done.
+    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "066");
+    fc1.setUMask(FsPermission.createImmutable((short) 00033));
+    assertEquals("Umask for fc1 is incorrect", 033, fc1.getUMask().toShort());
+    assertEquals("Umask for fc2 is incorrect", 066, fc2.getUMask().toShort());
+
+    conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
+    fc2.setUMask(FsPermission.createImmutable((short) 00044));
+    assertEquals("Umask for fc1 is incorrect", 033, fc1.getUMask().toShort());
+    assertEquals("Umask for fc2 is incorrect", 044, fc2.getUMask().toShort());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/498e3bfb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
index 81d5053..4ee5c8a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java
@@ -96,9 +96,6 @@ public class AggregatedLogFormat {
    */
   private static final FsPermission APP_LOG_FILE_UMASK = FsPermission
       .createImmutable((short) (0640 ^ 0777));
-  /** Default permission for the log file. */
-  private static final FsPermission APP_LOG_FILE_PERM =
-      FsPermission.getFileDefault().applyUMask(APP_LOG_FILE_UMASK);
 
   static {
     RESERVED_KEYS = new HashMap<String, AggregatedLogFormat.LogKey>();
@@ -477,10 +474,11 @@ public class AggregatedLogFormat {
               @Override
               public FSDataOutputStream run() throws Exception {
                 fc = FileContext.getFileContext(remoteAppLogFile.toUri(), conf);
+                fc.setUMask(APP_LOG_FILE_UMASK);
                 return fc.create(
                     remoteAppLogFile,
                     EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
-                    Options.CreateOpts.perms(APP_LOG_FILE_PERM));
+                    new Options.CreateOpts[] {});
               }
             });
       } catch (InterruptedException e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDFS-13528. RBF: If a directory exceeds quota limit then quota usage is not refreshed for other mount entries. Contributed by Dibyendu Karmakar.

Posted by vi...@apache.org.
HDFS-13528. RBF: If a directory exceeds quota limit then quota usage is not refreshed for other mount entries. Contributed by Dibyendu Karmakar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b637155
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b637155
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b637155

Branch: refs/heads/HDFS-12090
Commit: 3b637155a47d2aa93284969a96208347a647083d
Parents: 7ca4f0c
Author: Yiqun Lin <yq...@apache.org>
Authored: Wed Jul 4 15:03:24 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Wed Jul 4 15:03:24 2018 +0800

----------------------------------------------------------------------
 .../hdfs/server/federation/router/Quota.java    |   2 +-
 .../router/RouterQuotaUpdateService.java        |  43 +++-
 .../federation/router/TestRouterQuota.java      | 212 ++++++++++++++++++-
 3 files changed, 243 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b637155/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index dbb6ffa..413a4e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -199,7 +199,7 @@ public class Quota {
     if (manager != null) {
       Set<String> childrenPaths = manager.getPaths(path);
       for (String childPath : childrenPaths) {
-        locations.addAll(rpcServer.getLocationsForPath(childPath, true));
+        locations.addAll(rpcServer.getLocationsForPath(childPath, true, false));
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b637155/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
index 9fc93c1..506e2ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUpdateService.java
@@ -27,6 +27,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
@@ -83,13 +84,40 @@ public class RouterQuotaUpdateService extends PeriodicService {
         RouterQuotaUsage oldQuota = entry.getQuota();
         long nsQuota = oldQuota.getQuota();
         long ssQuota = oldQuota.getSpaceQuota();
-        // Call RouterRpcServer#getQuotaUsage for getting current quota usage.
-        QuotaUsage currentQuotaUsage = this.rpcServer.getQuotaModule()
-            .getQuotaUsage(src);
+
+        QuotaUsage currentQuotaUsage = null;
+
+        // Check whether destination path exists in filesystem. If destination
+        // is not present, reset the usage. For other mount entry get current
+        // quota usage
+        HdfsFileStatus ret = this.rpcServer.getFileInfo(src);
+        if (ret == null) {
+          currentQuotaUsage = new RouterQuotaUsage.Builder()
+              .fileAndDirectoryCount(0)
+              .quota(nsQuota)
+              .spaceConsumed(0)
+              .spaceQuota(ssQuota).build();
+        } else {
+          // Call RouterRpcServer#getQuotaUsage for getting current quota usage.
+          // If any exception occurs catch it and proceed with other entries.
+          try {
+            currentQuotaUsage = this.rpcServer.getQuotaModule()
+                .getQuotaUsage(src);
+          } catch (IOException ioe) {
+            LOG.error("Unable to get quota usage for " + src, ioe);
+            continue;
+          }
+        }
+
         // If quota is not set in some subclusters under federation path,
         // set quota for this path.
         if (currentQuotaUsage.getQuota() == HdfsConstants.QUOTA_DONT_SET) {
-          this.rpcServer.setQuota(src, nsQuota, ssQuota, null);
+          try {
+            this.rpcServer.setQuota(src, nsQuota, ssQuota, null);
+          } catch (IOException ioe) {
+            LOG.error("Unable to set quota at remote location for "
+                + src, ioe);
+          }
         }
 
         RouterQuotaUsage newQuota = generateNewQuota(oldQuota,
@@ -221,7 +249,12 @@ public class RouterQuotaUpdateService extends PeriodicService {
     for (MountTable entry : updateMountTables) {
       UpdateMountTableEntryRequest updateRequest = UpdateMountTableEntryRequest
           .newInstance(entry);
-      getMountTableStore().updateMountTableEntry(updateRequest);
+      try {
+        getMountTableStore().updateMountTableEntry(updateRequest);
+      } catch (IOException e) {
+        LOG.error("Quota update error for mount entry "
+            + entry.getSourcePath(), e);
+      }
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b637155/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
index c331c6b..431b394 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.federation.router;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Collections;
@@ -410,8 +411,7 @@ public class TestRouterQuota {
     updateService.periodicInvoke();
 
     // verify initial quota value
-    List<MountTable> results = getMountTable(path);
-    MountTable updatedMountTable = !results.isEmpty() ? results.get(0) : null;
+    MountTable updatedMountTable = getMountTable(path);
     RouterQuotaUsage quota = updatedMountTable.getQuota();
     assertEquals(nsQuota, quota.getQuota());
     assertEquals(ssQuota, quota.getSpaceQuota());
@@ -426,8 +426,7 @@ public class TestRouterQuota {
     appendData(path + "/file", routerClient, BLOCK_SIZE);
 
     updateService.periodicInvoke();
-    results = getMountTable(path);
-    updatedMountTable = !results.isEmpty() ? results.get(0) : null;
+    updatedMountTable = getMountTable(path);
     quota = updatedMountTable.getQuota();
 
     // verify if quota has been updated in state store
@@ -443,17 +442,18 @@ public class TestRouterQuota {
    * @return If it was successfully got.
    * @throws IOException Problems getting entries.
    */
-  private List<MountTable> getMountTable(String path) throws IOException {
+  private MountTable getMountTable(String path) throws IOException {
     // Reload the Router cache
     resolver.loadCache(true);
     RouterClient client = routerContext.getAdminClient();
     MountTableManager mountTableManager = client.getMountTableManager();
     GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
         .newInstance(path);
-    GetMountTableEntriesResponse removeResponse = mountTableManager
+    GetMountTableEntriesResponse response = mountTableManager
         .getMountTableEntries(getRequest);
+    List<MountTable> results = response.getEntries();
 
-    return removeResponse.getEntries();
+    return !results.isEmpty() ? results.get(0) : null;
   }
 
   @Test
@@ -493,4 +493,200 @@ public class TestRouterQuota {
     assertEquals(updateNsQuota, realQuota.getQuota());
     assertEquals(updateSsQuota, realQuota.getSpaceQuota());
   }
-}
+
+  @Test
+  public void testQuotaRefreshAfterQuotaExceed() throws Exception {
+    long nsQuota = 3;
+    long ssQuota = 100;
+    final FileSystem nnFs1 = nnContext1.getFileSystem();
+    final FileSystem nnFs2 = nnContext2.getFileSystem();
+
+    // Add two mount tables:
+    // /setquota1 --> ns0---testdir11
+    // /setquota2 --> ns1---testdir12
+    nnFs1.mkdirs(new Path("/testdir11"));
+    nnFs2.mkdirs(new Path("/testdir12"));
+    MountTable mountTable1 = MountTable.newInstance("/setquota1",
+        Collections.singletonMap("ns0", "/testdir11"));
+    mountTable1
+        .setQuota(new RouterQuotaUsage.Builder().quota(nsQuota)
+        .spaceQuota(ssQuota).build());
+    addMountTable(mountTable1);
+
+    MountTable mountTable2 = MountTable.newInstance("/setquota2",
+        Collections.singletonMap("ns1", "/testdir12"));
+    mountTable2
+        .setQuota(new RouterQuotaUsage.Builder().quota(nsQuota)
+        .spaceQuota(ssQuota).build());
+    addMountTable(mountTable2);
+
+    final FileSystem routerFs = routerContext.getFileSystem();
+    // Create directory to make directory count equals to nsQuota
+    routerFs.mkdirs(new Path("/setquota1/" + UUID.randomUUID()));
+    routerFs.mkdirs(new Path("/setquota1/" + UUID.randomUUID()));
+
+    // create one more directory to exceed the nsQuota
+    routerFs.mkdirs(new Path("/setquota1/" + UUID.randomUUID()));
+
+    RouterQuotaUpdateService updateService = routerContext.getRouter()
+        .getQuotaCacheUpdateService();
+    // Call RouterQuotaUpdateService#periodicInvoke to update quota cache
+    updateService.periodicInvoke();
+    // Reload the Router cache
+    resolver.loadCache(true);
+
+    RouterQuotaManager quotaManager =
+        routerContext.getRouter().getQuotaManager();
+    ClientProtocol client1 = nnContext1.getClient().getNamenode();
+    ClientProtocol client2 = nnContext2.getClient().getNamenode();
+    QuotaUsage quota1 = client1.getQuotaUsage("/testdir11");
+    QuotaUsage quota2 = client2.getQuotaUsage("/testdir12");
+    QuotaUsage cacheQuota1 = quotaManager.getQuotaUsage("/setquota1");
+    QuotaUsage cacheQuota2 = quotaManager.getQuotaUsage("/setquota2");
+
+    // Verify quota usage
+    assertEquals(4, quota1.getFileAndDirectoryCount());
+    assertEquals(4, cacheQuota1.getFileAndDirectoryCount());
+    assertEquals(1, quota2.getFileAndDirectoryCount());
+    assertEquals(1, cacheQuota2.getFileAndDirectoryCount());
+
+    try {
+      // create new directory to trigger NSQuotaExceededException
+      routerFs.mkdirs(new Path("/testdir11/" + UUID.randomUUID()));
+      fail("Mkdir should be failed under dir /testdir11.");
+    } catch (NSQuotaExceededException ignored) {
+    }
+
+    // Create directory under the other mount point
+    routerFs.mkdirs(new Path("/setquota2/" + UUID.randomUUID()));
+    routerFs.mkdirs(new Path("/setquota2/" + UUID.randomUUID()));
+
+    // Call RouterQuotaUpdateService#periodicInvoke to update quota cache
+    updateService.periodicInvoke();
+
+    quota1 = client1.getQuotaUsage("/testdir11");
+    cacheQuota1 = quotaManager.getQuotaUsage("/setquota1");
+    quota2 = client2.getQuotaUsage("/testdir12");
+    cacheQuota2 = quotaManager.getQuotaUsage("/setquota2");
+
+    // Verify whether quota usage cache is update by periodicInvoke().
+    assertEquals(4, quota1.getFileAndDirectoryCount());
+    assertEquals(4, cacheQuota1.getFileAndDirectoryCount());
+    assertEquals(3, quota2.getFileAndDirectoryCount());
+    assertEquals(3, cacheQuota2.getFileAndDirectoryCount());
+  }
+
+  /**
+   * Verify whether mount table and quota usage cache is updated properly.
+   * {@link RouterQuotaUpdateService#periodicInvoke()} should be able to update
+   * the cache and the mount table even if the destination directory for some
+   * mount entry is not present in the filesystem.
+   */
+  @Test
+  public void testQuotaRefreshWhenDestinationNotPresent() throws Exception {
+    long nsQuota = 5;
+    long ssQuota = 3*BLOCK_SIZE;
+    final FileSystem nnFs = nnContext1.getFileSystem();
+
+    // Add three mount tables:
+    // /setdir1 --> ns0---testdir13
+    // /setdir2 --> ns0---testdir14
+    // Create destination directory
+    nnFs.mkdirs(new Path("/testdir13"));
+    nnFs.mkdirs(new Path("/testdir14"));
+
+    MountTable mountTable = MountTable.newInstance("/setdir1",
+        Collections.singletonMap("ns0", "/testdir13"));
+    mountTable
+        .setQuota(new RouterQuotaUsage.Builder().quota(nsQuota)
+        .spaceQuota(ssQuota).build());
+    addMountTable(mountTable);
+
+    mountTable = MountTable.newInstance("/setdir2",
+        Collections.singletonMap("ns0", "/testdir14"));
+    mountTable
+        .setQuota(new RouterQuotaUsage.Builder().quota(nsQuota)
+        .spaceQuota(ssQuota).build());
+    addMountTable(mountTable);
+
+    final DFSClient routerClient = routerContext.getClient();
+    // Create file
+    routerClient.create("/setdir1/file1", true).close();
+    routerClient.create("/setdir2/file2", true).close();
+    // append data to the file
+    appendData("/setdir1/file1", routerClient, BLOCK_SIZE);
+    appendData("/setdir2/file2", routerClient, BLOCK_SIZE);
+
+    RouterQuotaUpdateService updateService =
+        routerContext.getRouter().getQuotaCacheUpdateService();
+    // Update quota cache
+    updateService.periodicInvoke();
+    // Reload the Router cache
+    resolver.loadCache(true);
+
+    ClientProtocol client1 = nnContext1.getClient().getNamenode();
+    RouterQuotaManager quotaManager =
+        routerContext.getRouter().getQuotaManager();
+    QuotaUsage quota1 = client1.getQuotaUsage("/testdir13");
+    QuotaUsage quota2 = client1.getQuotaUsage("/testdir14");
+    QuotaUsage cacheQuota1 = quotaManager.getQuotaUsage("/setdir1");
+    QuotaUsage cacheQuota2 = quotaManager.getQuotaUsage("/setdir2");
+
+    // Get quota details in mount table
+    MountTable updatedMountTable = getMountTable("/setdir1");
+    RouterQuotaUsage mountQuota1 = updatedMountTable.getQuota();
+    updatedMountTable = getMountTable("/setdir2");
+    RouterQuotaUsage mountQuota2 = updatedMountTable.getQuota();
+
+    // Verify quota usage
+    assertEquals(2, quota1.getFileAndDirectoryCount());
+    assertEquals(2, cacheQuota1.getFileAndDirectoryCount());
+    assertEquals(2, mountQuota1.getFileAndDirectoryCount());
+    assertEquals(2, quota2.getFileAndDirectoryCount());
+    assertEquals(2, cacheQuota2.getFileAndDirectoryCount());
+    assertEquals(2, mountQuota2.getFileAndDirectoryCount());
+    assertEquals(BLOCK_SIZE, quota1.getSpaceConsumed());
+    assertEquals(BLOCK_SIZE, cacheQuota1.getSpaceConsumed());
+    assertEquals(BLOCK_SIZE, mountQuota1.getSpaceConsumed());
+    assertEquals(BLOCK_SIZE, quota2.getSpaceConsumed());
+    assertEquals(BLOCK_SIZE, cacheQuota2.getSpaceConsumed());
+    assertEquals(BLOCK_SIZE, mountQuota2.getSpaceConsumed());
+
+    FileSystem routerFs = routerContext.getFileSystem();
+    // Remove destination directory for the mount entry
+    routerFs.delete(new Path("/setdir1"), true);
+
+    // Create file
+    routerClient.create("/setdir2/file3", true).close();
+    // append data to the file
+    appendData("/setdir2/file3", routerClient, BLOCK_SIZE);
+    int updatedSpace = BLOCK_SIZE + BLOCK_SIZE;
+
+    // Update quota cache
+    updateService.periodicInvoke();
+
+    quota2 = client1.getQuotaUsage("/testdir14");
+    cacheQuota1 = quotaManager.getQuotaUsage("/setdir1");
+    cacheQuota2 = quotaManager.getQuotaUsage("/setdir2");
+
+    // Get quota details in mount table
+    updatedMountTable = getMountTable("/setdir1");
+    mountQuota1 = updatedMountTable.getQuota();
+    updatedMountTable = getMountTable("/setdir2");
+    mountQuota2 = updatedMountTable.getQuota();
+
+    // If destination is not present the quota usage should be reset to 0
+    assertEquals(0, cacheQuota1.getFileAndDirectoryCount());
+    assertEquals(0, mountQuota1.getFileAndDirectoryCount());
+    assertEquals(0, cacheQuota1.getSpaceConsumed());
+    assertEquals(0, mountQuota1.getSpaceConsumed());
+
+    // Verify current quota usage for other mount entries
+    assertEquals(3, quota2.getFileAndDirectoryCount());
+    assertEquals(3, cacheQuota2.getFileAndDirectoryCount());
+    assertEquals(3, mountQuota2.getFileAndDirectoryCount());
+    assertEquals(updatedSpace, quota2.getSpaceConsumed());
+    assertEquals(updatedSpace, cacheQuota2.getSpaceConsumed());
+    assertEquals(updatedSpace, mountQuota2.getSpaceConsumed());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

Posted by vi...@apache.org.
YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a129e3e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a129e3e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a129e3e7

Branch: refs/heads/HDFS-12090
Commit: a129e3e74e16ed039d637dc1499dc3e5df317d94
Parents: 9edc74f
Author: Robert Kanter <rk...@apache.org>
Authored: Thu Jul 5 10:54:19 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Fri Jul 6 11:04:00 2018 -0700

----------------------------------------------------------------------
 .../resourcemanager/webapp/dao/AppInfo.java     |   2 +-
 .../webapp/dao/SchedulerInfo.java               |   8 +-
 .../fair/TestFairSchedulerConfiguration.java    |   9 +-
 .../webapp/TestRMWebServices.java               |  31 ++-
 .../webapp/TestRMWebServicesApps.java           |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 +++++++++++++++++
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 +
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 +++----
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ++++++++++
 .../FairSchedulerJsonVerifications.java         | 139 ++++++++++
 .../FairSchedulerXmlVerifications.java          | 153 +++++++++++
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 +++++++++++++++++++
 .../webapp/helper/AppInfoJsonVerifications.java | 123 +++++++++
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 +++++++++
 .../webapp/helper/BufferedClientResponse.java   |  57 ++++
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ++++++
 .../ResourceRequestsJsonVerifications.java      | 252 +++++++++++++++++
 .../ResourceRequestsXmlVerifications.java       | 215 +++++++++++++++
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 ++++++++
 21 files changed, 2020 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index d47f13d..9d82bc7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -479,7 +479,7 @@ public class AppInfo {
   public int getNumNonAMContainersPreempted() {
     return numNonAMContainerPreempted;
   }
-  
+
   public int getNumAMContainersPreempted() {
     return numAMContainerPreempted;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index 81491b1..163f707 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -41,8 +41,9 @@ public class SchedulerInfo {
   protected EnumSet<SchedulerResourceTypes> schedulingResourceTypes;
   protected int maximumClusterPriority;
 
+  // JAXB needs this
   public SchedulerInfo() {
-  } // JAXB needs this
+  }
 
   public SchedulerInfo(final ResourceManager rm) {
     ResourceScheduler rs = rm.getResourceScheduler();
@@ -74,7 +75,10 @@ public class SchedulerInfo {
   }
 
   public String getSchedulerResourceTypes() {
-    return Arrays.toString(minAllocResource.getResource().getResources());
+    if (minAllocResource != null) {
+      return Arrays.toString(minAllocResource.getResource().getResources());
+    }
+    return null;
   }
 
   public int getMaxClusterLevelAppPriority() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 76a5af5..70f83ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -48,6 +48,9 @@ import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Assert;
 import org.junit.Test;
 
+/**
+ * Tests fair scheduler configuration.
+ */
 public class TestFairSchedulerConfiguration {
 
   private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
@@ -242,12 +245,12 @@ public class TestFairSchedulerConfiguration {
         parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
             + "test1 = 50 % ").getResource(clusterResource));
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testNoUnits() throws Exception {
     parseResourceConfigValue("1024");
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testOnlyMemory() throws Exception {
     parseResourceConfigValue("1024mb");
@@ -257,7 +260,7 @@ public class TestFairSchedulerConfiguration {
   public void testOnlyCPU() throws Exception {
     parseResourceConfigValue("1024vcores");
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testGibberish() throws Exception {
     parseResourceConfigValue("1o24vc0res");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 0702d65..3902889 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -53,11 +53,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
-import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.*;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -76,11 +72,12 @@ import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
-import org.eclipse.jetty.server.Response;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
@@ -96,6 +93,8 @@ import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 public class TestRMWebServices extends JerseyTestBase {
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestRMWebServices.class);
 
   private static MockRM rm;
 
@@ -472,19 +471,19 @@ public class TestRMWebServices extends JerseyTestBase {
     QueueMetrics metrics = rs.getRootQueueMetrics();
     ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
 
-    long totalMBExpect = 
+    long totalMBExpect =
         metrics.getAvailableMB() + metrics.getAllocatedMB();
-    long totalVirtualCoresExpect = 
+    long totalVirtualCoresExpect =
         metrics.getAvailableVirtualCores() + metrics.getAllocatedVirtualCores();
-    assertEquals("appsSubmitted doesn't match", 
+    assertEquals("appsSubmitted doesn't match",
         metrics.getAppsSubmitted(), submittedApps);
-    assertEquals("appsCompleted doesn't match", 
+    assertEquals("appsCompleted doesn't match",
         metrics.getAppsCompleted(), completedApps);
     assertEquals("reservedMB doesn't match",
         metrics.getReservedMB(), reservedMB);
-    assertEquals("availableMB doesn't match", 
+    assertEquals("availableMB doesn't match",
         metrics.getAvailableMB(), availableMB);
-    assertEquals("allocatedMB doesn't match", 
+    assertEquals("allocatedMB doesn't match",
         metrics.getAllocatedMB(), allocMB);
     assertEquals("reservedVirtualCores doesn't match",
         metrics.getReservedVirtualCores(), reservedVirtualCores);
@@ -597,11 +596,13 @@ public class TestRMWebServices extends JerseyTestBase {
 
   public void verifyClusterSchedulerFifo(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("incorrect number of elements in: " + json, 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements", 1, info.length());
+    assertEquals("incorrect number of elements in: " + info, 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements", 11, info.length());
+
+    LOG.debug("schedulerInfo: {}", info);
+    assertEquals("incorrect number of elements in: " + info, 11, info.length());
 
     verifyClusterSchedulerFifoGeneric(info.getString("type"),
         info.getString("qstate"), (float) info.getDouble("capacity"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 6c6f400..15f94e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -79,7 +79,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
 public class TestRMWebServicesApps extends JerseyTestBase {
 
   private static MockRM rm;
-  
+
   private static final int CONTAINER_MB = 1024;
 
   private static class WebServletModule extends ServletModule {
@@ -324,7 +324,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+    assertTrue("both app states of ACCEPTED and KILLED are not present",
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
@@ -375,12 +375,12 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+    assertTrue("both app states of ACCEPTED and KILLED are not present",
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
         array.getJSONObject(1).getString("state").equals("ACCEPTED")));
-    
+
     rm.stop();
   }
 
@@ -511,7 +511,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("finalStatus", FinalApplicationStatus.UNDEFINED.toString())
+        .path("apps").queryParam("finalStatus",
+                    FinalApplicationStatus.UNDEFINED.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
@@ -1804,7 +1805,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     int numAttempt = 1;
     while (true) {
       // fail the AM by sending CONTAINER_FINISHED event without registering.
-      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
+      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1,
+              ContainerState.COMPLETE);
       rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FAILED);
       if (numAttempt == maxAppAttempts) {
         rm.waitForState(app1.getApplicationId(), RMAppState.FAILED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
new file mode 100644
index 0000000..83e0056
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
@@ -0,0 +1,242 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler.CustomResourceTypesConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoJsonVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoXmlVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.BufferedClientResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.JsonCustomResourceTypeTestcase;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsJsonVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsXmlVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.ws.rs.core.MediaType;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This test verifies that custom resource types are correctly serialized to XML
+ * and JSON when HTTP GET request is sent to the resource: ws/v1/cluster/apps.
+ */
+public class TestRMWebServicesAppsCustomResourceTypes extends JerseyTestBase {
+
+  private static MockRM rm;
+  private static final int CONTAINER_MB = 1024;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      Configuration conf = new Configuration();
+      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+          YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
+          ResourceScheduler.class);
+      initResourceTypes(conf);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+    private void initResourceTypes(Configuration conf) {
+      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+          CustomResourceTypesConfigurationProvider.class.getName());
+      ResourceUtils.resetResourceTypes(conf);
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createInjectorForWebServletModule();
+  }
+
+  private void createInjectorForWebServletModule() {
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
+  }
+
+  public TestRMWebServicesAppsCustomResourceTypes() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testRunningAppXml() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
+    am1.allocate("*", 2048, 1, new ArrayList<>());
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    XmlCustomResourceTypeTestCase testCase =
+            new XmlCustomResourceTypeTestCase(path,
+                    new BufferedClientResponse(response));
+    testCase.verify(document -> {
+      NodeList apps = document.getElementsByTagName("apps");
+      assertEquals("incorrect number of apps elements", 1, apps.getLength());
+
+      NodeList appArray = ((Element)(apps.item(0)))
+              .getElementsByTagName("app");
+      assertEquals("incorrect number of app elements", 1, appArray.getLength());
+
+      verifyAppsXML(appArray, app1);
+    });
+
+    rm.stop();
+  }
+
+  @Test
+  public void testRunningAppJson() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
+    am1.allocate("*", 2048, 1, new ArrayList<>());
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    JsonCustomResourceTypeTestcase testCase =
+        new JsonCustomResourceTypeTestcase(path,
+            new BufferedClientResponse(response));
+    testCase.verify(json -> {
+      try {
+        assertEquals("incorrect number of apps elements", 1, json.length());
+        JSONObject apps = json.getJSONObject("apps");
+        assertEquals("incorrect number of app elements", 1, apps.length());
+        JSONArray array = apps.getJSONArray("app");
+        assertEquals("incorrect count of app", 1, array.length());
+
+        verifyAppInfoJson(array.getJSONObject(0), app1);
+      } catch (JSONException e) {
+        throw new RuntimeException(e);
+      }
+    });
+
+    rm.stop();
+  }
+
+  private void verifyAppsXML(NodeList appArray, RMApp app) {
+    for (int i = 0; i < appArray.getLength(); i++) {
+      Element element = (Element) appArray.item(i);
+      AppInfoXmlVerifications.verify(element, app);
+
+      NodeList resourceRequests =
+          element.getElementsByTagName("resourceRequests");
+      assertEquals(1, resourceRequests.getLength());
+      Node resourceRequest = resourceRequests.item(0);
+      ResourceRequest rr =
+          ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+              .getApplicationAttempt(
+                  app.getCurrentAppAttempt().getAppAttemptId())
+              .getAppSchedulingInfo().getAllResourceRequests().get(0);
+      ResourceRequestsXmlVerifications.verifyWithCustomResourceTypes(
+              (Element) resourceRequest, rr,
+          CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+    }
+  }
+
+  private void verifyAppInfoJson(JSONObject info, RMApp app) throws
+          JSONException {
+    int expectedNumberOfElements = getExpectedNumberOfElements(app);
+
+    assertEquals("incorrect number of elements", expectedNumberOfElements,
+        info.length());
+
+    AppInfoJsonVerifications.verify(info, app);
+
+    JSONArray resourceRequests = info.getJSONArray("resourceRequests");
+    JSONObject requestInfo = resourceRequests.getJSONObject(0);
+    ResourceRequest rr =
+        ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+            .getApplicationAttempt(app.getCurrentAppAttempt().getAppAttemptId())
+            .getAppSchedulingInfo().getAllResourceRequests().get(0);
+
+    ResourceRequestsJsonVerifications.verifyWithCustomResourceTypes(
+            requestInfo, rr,
+            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  private int getExpectedNumberOfElements(RMApp app) {
+    int expectedNumberOfElements = 40 + 2; // 2 -> resourceRequests
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+    }
+
+    if (app.getAMResourceRequests().get(0).getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+    }
+
+    if (AppInfo
+        .getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()) != null) {
+      expectedNumberOfElements++;
+    }
+    return expectedNumberOfElements;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index e37f76f..46d0a66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -146,7 +146,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     config.setUserLimitFactor(B2, 100.0f);
     config.setCapacity(B3, 0.5f);
     config.setUserLimitFactor(B3, 100.0f);
-    
+
     config.setQueues(A1, new String[] {"a1a", "a1b"});
     final String A1A = A1 + ".a1a";
     config.setCapacity(A1A, 85);
@@ -254,7 +254,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     }
   }
 
-  public void verifySubQueueXML(Element qElem, String q, 
+  public void verifySubQueueXML(Element qElem, String q,
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws Exception {
     NodeList children = qElem.getChildNodes();
@@ -317,30 +317,34 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
 
   private void verifyClusterScheduler(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("incorrect number of elements in: " + json, 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements", 1, info.length());
+    assertEquals("incorrect number of elements in: " + info, 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements", 8, info.length());
+    assertEquals("incorrect number of elements in: " + info, 8, info.length());
     verifyClusterSchedulerGeneric(info.getString("type"),
         (float) info.getDouble("usedCapacity"),
         (float) info.getDouble("capacity"),
         (float) info.getDouble("maxCapacity"), info.getString("queueName"));
     JSONObject health = info.getJSONObject("health");
     assertNotNull(health);
-    assertEquals("incorrect number of elements", 3, health.length());
+    assertEquals("incorrect number of elements in: " + health, 3,
+        health.length());
     JSONArray operationsInfo = health.getJSONArray("operationsInfo");
-    assertEquals("incorrect number of elements", 4, operationsInfo.length());
+    assertEquals("incorrect number of elements in: " + health, 4,
+        operationsInfo.length());
     JSONArray lastRunDetails = health.getJSONArray("lastRunDetails");
-    assertEquals("incorrect number of elements", 3, lastRunDetails.length());
+    assertEquals("incorrect number of elements in: " + health, 3,
+        lastRunDetails.length());
 
     JSONArray arr = info.getJSONObject("queues").getJSONArray("queue");
-    assertEquals("incorrect number of elements", 2, arr.length());
+    assertEquals("incorrect number of elements in: " + arr, 2, arr.length());
 
     // test subqueues
     for (int i = 0; i < arr.length(); i++) {
       JSONObject obj = arr.getJSONObject(i);
-      String q = CapacitySchedulerConfiguration.ROOT + "." + obj.getString("queueName");
+      String q = CapacitySchedulerConfiguration.ROOT + "." +
+              obj.getString("queueName");
       verifySubQueue(obj, q, 100, 100);
     }
   }
@@ -355,7 +359,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     assertTrue("queueName doesn't match", "root".matches(queueName));
   }
 
-  private void verifySubQueue(JSONObject info, String q, 
+  private void verifySubQueue(JSONObject info, String q,
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws JSONException, Exception {
     int numExpectedElements = 20;
@@ -464,7 +468,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
         csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
   }
 
-  //Return a child Node of node with the tagname or null if none exists 
+  //Return a child Node of node with the tagname or null if none exists
   private Node getChildNodeByName(Node node, String tagname) {
     NodeList nodeList = node.getChildNodes();
     for (int i=0; i < nodeList.getLength(); ++i) {
@@ -514,7 +518,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
           for (int j=0; j<users.getLength(); ++j) {
             Node user = users.item(j);
             String username = getChildNodeByName(user, "username")
-              .getTextContent(); 
+                .getTextContent();
             assertTrue(username.equals("user1") || username.equals("user2"));
             //Should be a parsable integer
             Integer.parseInt(getChildNodeByName(getChildNodeByName(user,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 3d28f12..99b5648 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response.Status;
@@ -59,6 +61,8 @@ import static org.junit.Assert.assertNull;
  * Test scheduler configuration mutation via REST API.
  */
 public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
+  private static final Logger LOG = LoggerFactory
+          .getLogger(TestRMWebServicesConfigurationMutation.class);
 
   private static final File CONF_FILE = new File(new File("target",
       "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
@@ -396,6 +400,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
             .entity(YarnWebServiceUtils.toJson(updateInfo,
                 SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
+    LOG.debug("Response headers: " + response.getHeaders());
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
     assertEquals(0.2f, newCSConf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
index e77785b..58c72ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,13 +16,14 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import javax.ws.rs.core.MediaType;
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
 
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -30,6 +31,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
+
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
@@ -38,18 +42,18 @@ import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Before;
 import org.junit.Test;
+import javax.ws.rs.core.MediaType;
 
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
+/**
+ * Tests RM Webservices fair scheduler resources.
+ */
 public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   private static MockRM rm;
   private static YarnConfiguration conf;
-  
+
   private static class WebServletModule extends ServletModule {
     @Override
     protected void configureServlets() {
@@ -58,7 +62,7 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
       bind(GenericExceptionHandler.class);
       conf = new YarnConfiguration();
       conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
-        ResourceScheduler.class);
+          ResourceScheduler.class);
       rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       serve("/*").with(GuiceContainer.class);
@@ -66,32 +70,32 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   static {
-    GuiceServletConfig.setInjector(
-        Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
   }
 
   @Before
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    GuiceServletConfig.setInjector(
-        Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
   }
 
   public TestRMWebServicesFairScheduler() {
     super(new WebAppDescriptor.Builder(
         "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-        .contextListenerClass(GuiceServletConfig.class)
-        .filterClass(com.google.inject.servlet.GuiceFilter.class)
-        .contextPath("jersey-guice-filter").servletPath("/").build());
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
   }
-  
+
   @Test
-  public void testClusterScheduler() throws JSONException, Exception {
+  public void testClusterScheduler() throws JSONException {
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -99,52 +103,51 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   @Test
-  public void testClusterSchedulerSlash() throws JSONException, Exception {
+  public void testClusterSchedulerSlash() throws JSONException {
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler/").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler/")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     verifyClusterScheduler(json);
   }
-  
+
   @Test
-  public void testClusterSchedulerWithSubQueues() throws JSONException,
-      Exception {
-    FairScheduler scheduler = (FairScheduler)rm.getResourceScheduler();
+  public void testClusterSchedulerWithSubQueues()
+      throws JSONException {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
     QueueManager queueManager = scheduler.getQueueManager();
     // create LeafQueue
     queueManager.getLeafQueue("root.q.subqueue1", true);
     queueManager.getLeafQueue("root.q.subqueue2", true);
 
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     JSONArray subQueueInfo = json.getJSONObject("scheduler")
         .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
-        .getJSONObject("childQueues").getJSONArray("queue")
-        .getJSONObject(1).getJSONObject("childQueues").getJSONArray("queue");
+        .getJSONObject("childQueues").getJSONArray("queue").getJSONObject(1)
+        .getJSONObject("childQueues").getJSONArray("queue");
     // subQueueInfo is consist of subqueue1 and subqueue2 info
     assertEquals(2, subQueueInfo.length());
 
     // Verify 'childQueues' field is omitted from FairSchedulerLeafQueueInfo.
     try {
       subQueueInfo.getJSONObject(1).getJSONObject("childQueues");
-      fail("FairSchedulerQueueInfo should omit field 'childQueues'" +
-           "if child queue is empty.");
+      fail("FairSchedulerQueueInfo should omit field 'childQueues'"
+          + "if child queue is empty.");
     } catch (JSONException je) {
       assertEquals("JSONObject[\"childQueues\"] not found.", je.getMessage());
     }
   }
 
-  private void verifyClusterScheduler(JSONObject json) throws JSONException,
-      Exception {
+  private void verifyClusterScheduler(JSONObject json) throws JSONException {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
     assertEquals("incorrect number of elements", 1, info.length());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
index 1e61186..40cf483 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
@@ -457,7 +457,7 @@ public class TestRMWebServicesSchedulerActivities
       if (object.getClass() == JSONObject.class) {
         assertEquals("Number of allocations is wrong", 1, realValue);
       } else if (object.getClass() == JSONArray.class) {
-        assertEquals("Number of allocations is wrong",
+        assertEquals("Number of allocations is wrong in: " + object,
             ((JSONArray) object).length(), realValue);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
new file mode 100644
index 0000000..bb1fce0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.LocalConfigurationProvider;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static java.util.stream.Collectors.toList;
+
+/**
+ * This class can generate an XML configuration file of custom resource types.
+ * See createInitialResourceTypes for the default values. All custom resource
+ * type is prefixed with CUSTOM_RESOURCE_PREFIX. Please use the
+ * getConfigurationInputStream method to get an InputStream of the XML. If you
+ * want to have different number of resources in your tests, please see usages
+ * of this class in this test class:
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}
+ *
+ */
+public class CustomResourceTypesConfigurationProvider
+    extends LocalConfigurationProvider {
+
+  private static class CustomResourceTypes {
+    private int count;
+    private String xml;
+
+    CustomResourceTypes(String xml, int count) {
+      this.xml = xml;
+      this.count = count;
+    }
+
+    public int getCount() {
+      return count;
+    }
+
+    public String getXml() {
+      return xml;
+    }
+  }
+
+  private static final String CUSTOM_RESOURCE_PREFIX = "customResource-";
+
+  private static CustomResourceTypes customResourceTypes =
+      createInitialResourceTypes();
+
+  private static CustomResourceTypes createInitialResourceTypes() {
+    return createCustomResourceTypes(2);
+  }
+
+  private static CustomResourceTypes createCustomResourceTypes(int count) {
+    List<String> resourceTypeNames = generateResourceTypeNames(count);
+
+    List<String> resourceUnitXmlElements = IntStream.range(0, count)
+            .boxed()
+            .map(i -> getResourceUnitsXml(resourceTypeNames.get(i)))
+            .collect(toList());
+
+    StringBuilder sb = new StringBuilder("<configuration>\n");
+    sb.append(getResourceTypesXml(resourceTypeNames));
+
+    for (String resourceUnitXml : resourceUnitXmlElements) {
+      sb.append(resourceUnitXml);
+
+    }
+    sb.append("</configuration>");
+
+    return new CustomResourceTypes(sb.toString(), count);
+  }
+
+  private static List<String> generateResourceTypeNames(int count) {
+    return IntStream.range(0, count)
+            .boxed()
+            .map(i -> CUSTOM_RESOURCE_PREFIX + i)
+            .collect(toList());
+  }
+
+  private static String getResourceUnitsXml(String resource) {
+    return "<property>\n" + "<name>yarn.resource-types." + resource
+        + ".units</name>\n" + "<value>k</value>\n" + "</property>\n";
+  }
+
+  private static String getResourceTypesXml(List<String> resources) {
+    final String resourceTypes = makeCommaSeparatedString(resources);
+
+    return "<property>\n" + "<name>yarn.resource-types</name>\n" + "<value>"
+        + resourceTypes + "</value>\n" + "</property>\n";
+  }
+
+  private static String makeCommaSeparatedString(List<String> resources) {
+    return resources.stream().collect(Collectors.joining(","));
+  }
+
+  @Override
+  public InputStream getConfigurationInputStream(Configuration bootstrapConf,
+      String name) throws YarnException, IOException {
+    if (YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE.equals(name)) {
+      return new ByteArrayInputStream(
+          customResourceTypes.getXml().getBytes());
+    } else {
+      return super.getConfigurationInputStream(bootstrapConf, name);
+    }
+  }
+
+  public static void reset() {
+    customResourceTypes = createInitialResourceTypes();
+  }
+
+  public static void setNumberOfResourceTypes(int count) {
+    customResourceTypes = createCustomResourceTypes(count);
+  }
+
+  public static List<String> getCustomResourceTypes() {
+    return generateResourceTypeNames(customResourceTypes.getCount());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
new file mode 100644
index 0000000..924411a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This test helper class is primarily used by
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
+ */
+public class FairSchedulerJsonVerifications {
+
+  private static final Set<String> RESOURCE_FIELDS =
+      Sets.newHashSet("minResources", "amUsedResources", "amMaxResources",
+          "fairResources", "clusterResources", "reservedResources",
+              "maxResources", "usedResources", "steadyFairResources",
+              "demandResources");
+  private final Set<String> customResourceTypes;
+
+  FairSchedulerJsonVerifications(List<String> customResourceTypes) {
+    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
+  }
+
+  public void verify(JSONObject jsonObject) {
+    try {
+      verifyResourcesContainDefaultResourceTypes(jsonObject, RESOURCE_FIELDS);
+      verifyResourcesContainCustomResourceTypes(jsonObject, RESOURCE_FIELDS);
+    } catch (JSONException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void verifyResourcesContainDefaultResourceTypes(JSONObject queue,
+      Set<String> resourceCategories) throws JSONException {
+    for (String resourceCategory : resourceCategories) {
+      boolean hasResourceCategory = queue.has(resourceCategory);
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, hasResourceCategory);
+      verifyResourceContainsDefaultResourceTypes(
+          queue.getJSONObject(resourceCategory));
+    }
+  }
+
+  private void verifyResourceContainsDefaultResourceTypes(
+      JSONObject jsonObject) {
+    Object memory = jsonObject.opt("memory");
+    Object vCores = jsonObject.opt("vCores");
+
+    assertNotNull("Key 'memory' not found in: " + jsonObject, memory);
+    assertNotNull("Key 'vCores' not found in: " + jsonObject, vCores);
+  }
+
+  private void verifyResourcesContainCustomResourceTypes(JSONObject queue,
+      Set<String> resourceCategories) throws JSONException {
+    for (String resourceCategory : resourceCategories) {
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, queue.has(resourceCategory));
+      verifyResourceContainsAllCustomResourceTypes(
+          queue.getJSONObject(resourceCategory));
+    }
+  }
+
+  private void verifyResourceContainsAllCustomResourceTypes(
+      JSONObject resourceCategory) throws JSONException {
+    assertTrue("resourceCategory does not have resourceInformations: "
+        + resourceCategory, resourceCategory.has("resourceInformations"));
+
+    JSONObject resourceInformations =
+        resourceCategory.getJSONObject("resourceInformations");
+    assertTrue(
+        "resourceInformations does not have resourceInformation object: "
+            + resourceInformations,
+        resourceInformations.has("resourceInformation"));
+    JSONArray customResources =
+        resourceInformations.getJSONArray("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        customResourceTypes.size(), customResources.length() - 2);
+
+    for (int i = 0; i < customResources.length(); i++) {
+      JSONObject customResource = customResources.getJSONObject(i);
+      assertTrue("Resource type does not have name field: " + customResource,
+          customResource.has("name"));
+      assertTrue("Resource type does not have name resourceType field: "
+          + customResource, customResource.has("resourceType"));
+      assertTrue(
+          "Resource type does not have name units field: " + customResource,
+          customResource.has("units"));
+      assertTrue(
+          "Resource type does not have name value field: " + customResource,
+          customResource.has("value"));
+
+      String name = customResource.getString("name");
+      String unit = customResource.getString("units");
+      String resourceType = customResource.getString("resourceType");
+      Long value = customResource.getLong("value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          customResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Custom resource value " + value + " is null!", value);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
new file mode 100644
index 0000000..63ae7b7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This test helper class is primarily used by
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
+ */
+public class FairSchedulerXmlVerifications {
+
+  private static final Set<String> RESOURCE_FIELDS = Sets.newHashSet(
+      "minResources", "amUsedResources", "amMaxResources", "fairResources",
+      "clusterResources", "reservedResources", "maxResources", "usedResources",
+      "steadyFairResources", "demandResources");
+  private final Set<String> customResourceTypes;
+
+  FairSchedulerXmlVerifications(List<String> customResourceTypes) {
+    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
+  }
+
+  public void verify(Element element) {
+    verifyResourcesContainDefaultResourceTypes(element, RESOURCE_FIELDS);
+    verifyResourcesContainCustomResourceTypes(element, RESOURCE_FIELDS);
+  }
+
+  private void verifyResourcesContainDefaultResourceTypes(Element queue,
+      Set<String> resourceCategories) {
+    for (String resourceCategory : resourceCategories) {
+      boolean hasResourceCategory = hasChild(queue, resourceCategory);
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, hasResourceCategory);
+      verifyResourceContainsDefaultResourceTypes(
+              (Element) queue.getElementsByTagName(resourceCategory).item(0));
+    }
+  }
+
+  private void verifyResourceContainsDefaultResourceTypes(
+      Element element) {
+    Object memory = opt(element, "memory");
+    Object vCores = opt(element, "vCores");
+
+    assertNotNull("Key 'memory' not found in: " + element, memory);
+    assertNotNull("Key 'vCores' not found in: " + element, vCores);
+  }
+
+  private void verifyResourcesContainCustomResourceTypes(Element queue,
+      Set<String> resourceCategories) {
+    for (String resourceCategory : resourceCategories) {
+      assertTrue("Queue " + queue + " does not have key for resourceCategory: "
+          + resourceCategory, hasChild(queue, resourceCategory));
+      verifyResourceContainsCustomResourceTypes(
+              (Element) queue.getElementsByTagName(resourceCategory).item(0));
+    }
+  }
+
+  private void verifyResourceContainsCustomResourceTypes(
+      Element resourceCategory) {
+    assertEquals(
+        toXml(resourceCategory)
+            + " should have only one resourceInformations child!",
+        1, resourceCategory.getElementsByTagName("resourceInformations")
+            .getLength());
+    Element resourceInformations = (Element) resourceCategory
+        .getElementsByTagName("resourceInformations").item(0);
+
+    NodeList customResources =
+        resourceInformations.getElementsByTagName("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        customResourceTypes.size(), customResources.getLength() - 2);
+
+    for (int i = 0; i < customResources.getLength(); i++) {
+      Element customResource = (Element) customResources.item(i);
+      String name = getXmlString(customResource, "name");
+      String unit = getXmlString(customResource, "units");
+      String resourceType = getXmlString(customResource, "resourceType");
+      Long value = getXmlLong(customResource, "value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          customResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Resource value should not be null for resource type "
+          + resourceType + ", listing xml contents: " + toXml(customResource),
+          value);
+    }
+  }
+
+  private Object opt(Node node, String child) {
+    NodeList nodes = getElementsByTagNameInternal(node, child);
+    if (nodes.getLength() > 0) {
+      return nodes.item(0);
+    }
+
+    return null;
+  }
+
+  private boolean hasChild(Node node, String child) {
+    return getElementsByTagNameInternal(node, child).getLength() > 0;
+  }
+
+  private NodeList getElementsByTagNameInternal(Node node, String child) {
+    if (node instanceof Element) {
+      return ((Element) node).getElementsByTagName(child);
+    } else if (node instanceof Document) {
+      return ((Document) node).getElementsByTagName(child);
+    } else {
+      throw new IllegalStateException("Unknown type of wrappedObject: " + node
+          + ", type: " + node.getClass());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
new file mode 100644
index 0000000..de4d5a1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.*;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Element;
+import javax.ws.rs.core.MediaType;
+import java.lang.reflect.Method;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class is to test response representations of queue resources,
+ * explicitly setting custom resource types. with the help of
+ * {@link CustomResourceTypesConfigurationProvider}
+ */
+public class TestRMWebServicesFairSchedulerCustomResourceTypes
+    extends JerseyTestBase {
+  private static MockRM rm;
+  private static YarnConfiguration conf;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      conf = new YarnConfiguration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
+          ResourceScheduler.class);
+      initResourceTypes(conf);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+    private void initResourceTypes(YarnConfiguration conf) {
+      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+          CustomResourceTypesConfigurationProvider.class.getName());
+      ResourceUtils.resetResourceTypes(conf);
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createInjectorForWebServletModule();
+  }
+
+  @After
+  public void tearDown() {
+    ResourceUtils.resetResourceTypes(new Configuration());
+  }
+
+  private void createInjectorForWebServletModule() {
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
+  }
+
+  @After
+  public void teardown() {
+    CustomResourceTypesConfigurationProvider.reset();
+  }
+
+  public TestRMWebServicesFairSchedulerCustomResourceTypes() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testClusterSchedulerWithCustomResourceTypesJson() {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    verifyJsonResponse(path, response,
+            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerWithCustomResourceTypesXml() {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    verifyXmlResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerWithElevenCustomResourceTypesXml() {
+    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
+    createInjectorForWebServletModule();
+
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    verifyXmlResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerElevenWithCustomResourceTypesJson() {
+    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
+    createInjectorForWebServletModule();
+
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    verifyJsonResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  private void verifyJsonResponse(WebResource path, ClientResponse response,
+      List<String> customResourceTypes) {
+    JsonCustomResourceTypeTestcase testCase =
+        new JsonCustomResourceTypeTestcase(path,
+            new BufferedClientResponse(response));
+    testCase.verify(json -> {
+      try {
+        JSONArray queues = json.getJSONObject("scheduler")
+            .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
+            .getJSONObject("childQueues").getJSONArray("queue");
+
+        // childQueueInfo consists of subqueue1 and subqueue2 info
+        assertEquals(2, queues.length());
+        JSONObject firstChildQueue = queues.getJSONObject(0);
+        new FairSchedulerJsonVerifications(customResourceTypes)
+            .verify(firstChildQueue);
+      } catch (JSONException e) {
+        throw new RuntimeException(e);
+      }
+    });
+  }
+
+  private void verifyXmlResponse(WebResource path, ClientResponse response,
+          List<String> customResourceTypes) {
+    XmlCustomResourceTypeTestCase testCase = new XmlCustomResourceTypeTestCase(
+        path, new BufferedClientResponse(response));
+
+    testCase.verify(xml -> {
+      Element scheduler =
+          (Element) xml.getElementsByTagName("scheduler").item(0);
+      Element schedulerInfo =
+          (Element) scheduler.getElementsByTagName("schedulerInfo").item(0);
+      Element rootQueue =
+          (Element) schedulerInfo.getElementsByTagName("rootQueue").item(0);
+
+      Element childQueues =
+          (Element) rootQueue.getElementsByTagName("childQueues").item(0);
+      Element queue =
+          (Element) childQueues.getElementsByTagName("queue").item(0);
+      new FairSchedulerXmlVerifications(customResourceTypes).verify(queue);
+    });
+  }
+
+  private void incrementUsedResourcesOnQueue(final FSLeafQueue queue,
+      final long value) {
+    try {
+      Method incUsedResourceMethod = queue.getClass().getSuperclass()
+          .getDeclaredMethod("incUsedResource", Resource.class);
+      incUsedResourceMethod.setAccessible(true);
+
+      Map<String, Long> customResources =
+          CustomResourceTypesConfigurationProvider.getCustomResourceTypes()
+              .stream()
+              .collect(Collectors.toMap(Function.identity(), v -> value));
+
+      incUsedResourceMethod.invoke(queue,
+          Resource.newInstance(20, 30, customResources));
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
new file mode 100644
index 0000000..4ab1443
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringEqual;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.junit.Assert.*;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * JSON objects.
+ */
+public final class AppInfoJsonVerifications {
+
+  private AppInfoJsonVerifications() {
+    //utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param  app  an RMApp instance that contains the required values
+   *              to test against.
+   */
+  public static void verify(JSONObject info, RMApp app) throws JSONException {
+    checkStringMatch("id", app.getApplicationId().toString(),
+        info.getString("id"));
+    checkStringMatch("user", app.getUser(), info.getString("user"));
+    checkStringMatch("name", app.getName(), info.getString("name"));
+    checkStringMatch("applicationType", app.getApplicationType(),
+        info.getString("applicationType"));
+    checkStringMatch("queue", app.getQueue(), info.getString("queue"));
+    assertEquals("priority doesn't match", 0, info.getInt("priority"));
+    checkStringMatch("state", app.getState().toString(),
+        info.getString("state"));
+    checkStringMatch("finalStatus", app.getFinalApplicationStatus().toString(),
+        info.getString("finalStatus"));
+    assertEquals("progress doesn't match", 0,
+        (float) info.getDouble("progress"), 0.0);
+    if ("UNASSIGNED".equals(info.getString("trackingUI"))) {
+      checkStringMatch("trackingUI", "UNASSIGNED",
+          info.getString("trackingUI"));
+    }
+    checkStringEqual("diagnostics", app.getDiagnostics().toString(),
+        info.getString("diagnostics"));
+    assertEquals("clusterId doesn't match",
+        ResourceManager.getClusterTimeStamp(), info.getLong("clusterId"));
+    assertEquals("startedTime doesn't match", app.getStartTime(),
+        info.getLong("startedTime"));
+    assertEquals("finishedTime doesn't match", app.getFinishTime(),
+        info.getLong("finishedTime"));
+    assertTrue("elapsed time not greater than 0",
+        info.getLong("elapsedTime") > 0);
+    checkStringMatch("amHostHttpAddress",
+        app.getCurrentAppAttempt().getMasterContainer().getNodeHttpAddress(),
+        info.getString("amHostHttpAddress"));
+    assertTrue("amContainerLogs doesn't match",
+        info.getString("amContainerLogs").startsWith("http://"));
+    assertTrue("amContainerLogs doesn't contain user info",
+        info.getString("amContainerLogs").endsWith("/" + app.getUser()));
+    assertEquals("allocatedMB doesn't match", 1024, info.getInt("allocatedMB"));
+    assertEquals("allocatedVCores doesn't match", 1,
+        info.getInt("allocatedVCores"));
+    assertEquals("queueUsagePerc doesn't match", 50.0f,
+        (float) info.getDouble("queueUsagePercentage"), 0.01f);
+    assertEquals("clusterUsagePerc doesn't match", 50.0f,
+        (float) info.getDouble("clusterUsagePercentage"), 0.01f);
+    assertEquals("numContainers doesn't match", 1,
+        info.getInt("runningContainers"));
+    assertNotNull("preemptedResourceSecondsMap should not be null",
+        info.getJSONObject("preemptedResourceSecondsMap"));
+    assertEquals("preemptedResourceMB doesn't match",
+        app.getRMAppMetrics().getResourcePreempted().getMemorySize(),
+        info.getInt("preemptedResourceMB"));
+    assertEquals("preemptedResourceVCores doesn't match",
+        app.getRMAppMetrics().getResourcePreempted().getVirtualCores(),
+        info.getInt("preemptedResourceVCores"));
+    assertEquals("numNonAMContainerPreempted doesn't match",
+        app.getRMAppMetrics().getNumNonAMContainersPreempted(),
+        info.getInt("numNonAMContainerPreempted"));
+    assertEquals("numAMContainerPreempted doesn't match",
+        app.getRMAppMetrics().getNumAMContainersPreempted(),
+        info.getInt("numAMContainerPreempted"));
+    assertEquals("Log aggregation Status doesn't match",
+        app.getLogAggregationStatusForAppReport().toString(),
+        info.getString("logAggregationStatus"));
+    assertEquals("unmanagedApplication doesn't match",
+        app.getApplicationSubmissionContext().getUnmanagedAM(),
+        info.getBoolean("unmanagedApplication"));
+
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      assertEquals("appNodeLabelExpression doesn't match",
+          app.getApplicationSubmissionContext().getNodeLabelExpression(),
+          info.getString("appNodeLabelExpression"));
+    }
+    assertEquals("amNodeLabelExpression doesn't match",
+        app.getAMResourceRequests().get(0).getNodeLabelExpression(),
+        info.getString("amNodeLabelExpression"));
+    assertEquals("amRPCAddress",
+        AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
+        info.getString("amRPCAddress"));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: HDFS-13121. NPE when request file descriptors when SC read. Contributed by Zsolt Venczel.

Posted by vi...@apache.org.
HDFS-13121. NPE when request file descriptors when SC read. Contributed by Zsolt Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0247cb63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0247cb63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0247cb63

Branch: refs/heads/HDFS-12090
Commit: 0247cb6318507afe06816e337a19f396afc53efa
Parents: 061b168
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Fri Jul 6 14:59:49 2018 -0700
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Fri Jul 6 14:59:49 2018 -0700

----------------------------------------------------------------------
 .../hdfs/client/impl/BlockReaderFactory.java    |  5 ++
 .../shortcircuit/TestShortCircuitCache.java     | 89 ++++++++++++++++++++
 2 files changed, 94 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0247cb63/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index 1003b95..ce43185 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -598,6 +598,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       sock.recvFileInputStreams(fis, buf, 0, buf.length);
       ShortCircuitReplica replica = null;
       try {
+        if (fis[0] == null || fis[1] == null) {
+          throw new IOException("the datanode " + datanode + " failed to " +
+              "pass a file descriptor (might have reached open file limit).");
+        }
+
         ExtendedBlockId key =
             new ExtendedBlockId(block.getBlockId(), block.getBlockPoolId());
         if (buf[0] == USE_RECEIPT_VERIFICATION.getNumber()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0247cb63/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 4e2cede..ac29c3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -42,6 +42,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.ClientContext;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.PeerCache;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
 import org.apache.hadoop.hdfs.DFSInputStream;
@@ -50,10 +54,12 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.net.DomainPeer;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
@@ -66,9 +72,11 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
@@ -819,4 +827,85 @@ public class TestShortCircuitCache {
         .fetch(Mockito.eq(extendedBlockId), Mockito.any());
     }
   }
+
+  @Test
+  public void testRequestFileDescriptorsWhenULimit() throws Exception {
+    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
+    Configuration conf = createShortCircuitConf(
+        "testRequestFileDescriptorsWhenULimit", sockDir);
+
+    final short replicas = 1;
+    final int fileSize = 3;
+    final String testFile = "/testfile";
+
+    try (MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(replicas).build()) {
+
+      cluster.waitActive();
+
+      DistributedFileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, new Path(testFile), fileSize, replicas, 0L);
+
+      LocatedBlock blk = new DFSClient(DFSUtilClient.getNNAddress(conf), conf)
+          .getLocatedBlocks(testFile, 0, fileSize).get(0);
+
+      ClientContext clientContext = Mockito.mock(ClientContext.class);
+      Mockito.when(clientContext.getPeerCache()).thenAnswer(
+          (Answer<PeerCache>) peerCacheCall -> {
+            PeerCache peerCache = new PeerCache(10, Long.MAX_VALUE);
+            DomainPeer peer = Mockito.spy(getDomainPeerToDn(conf));
+            peerCache.put(blk.getLocations()[0], peer);
+
+            Mockito.when(peer.getDomainSocket()).thenAnswer(
+                (Answer<DomainSocket>) domainSocketCall -> {
+                  DomainSocket domainSocket = Mockito.mock(DomainSocket.class);
+                  Mockito.when(domainSocket
+                      .recvFileInputStreams(
+                          Mockito.any(FileInputStream[].class),
+                          Mockito.any(byte[].class),
+                          Mockito.anyInt(),
+                          Mockito.anyInt())
+                  ).thenAnswer(
+                      // we are mocking the FileOutputStream array with nulls
+                      (Answer<Void>) recvFileInputStreamsCall -> null
+                  );
+                  return domainSocket;
+                }
+            );
+
+            return peerCache;
+          });
+
+      Mockito.when(clientContext.getShortCircuitCache()).thenAnswer(
+          (Answer<ShortCircuitCache>) shortCircuitCacheCall -> {
+            ShortCircuitCache cache = Mockito.mock(ShortCircuitCache.class);
+            Mockito.when(cache.allocShmSlot(
+                Mockito.any(DatanodeInfo.class),
+                Mockito.any(DomainPeer.class),
+                Mockito.any(MutableBoolean.class),
+                Mockito.any(ExtendedBlockId.class),
+                Mockito.anyString()))
+                .thenAnswer((Answer<Slot>) call -> null);
+
+            return cache;
+          }
+      );
+
+      DatanodeInfo[] nodes = blk.getLocations();
+
+      try {
+        Assert.assertNull(new BlockReaderFactory(new DfsClientConf(conf))
+            .setInetSocketAddress(NetUtils.createSocketAddr(nodes[0]
+                .getXferAddr()))
+            .setClientCacheContext(clientContext)
+            .setDatanodeInfo(blk.getLocations()[0])
+            .setBlock(blk.getBlock())
+            .setBlockToken(new Token())
+            .createShortCircuitReplicaInfo());
+      } catch (NullPointerException ex) {
+        Assert.fail("Should not throw NPE when the native library is unable " +
+            "to create new files!");
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: HDFS-13712. BlockReaderRemote.read() logging improvement. Contributed by Gergo Repas.

Posted by vi...@apache.org.
HDFS-13712. BlockReaderRemote.read() logging improvement. Contributed by Gergo Repas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/344f3247
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/344f3247
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/344f3247

Branch: refs/heads/HDFS-12090
Commit: 344f324710522ffb27852c1a673c4f7d3d6eac4b
Parents: d9ba6f3
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Jul 3 11:07:45 2018 +0200
Committer: Andrew Wang <wa...@apache.org>
Committed: Tue Jul 3 11:07:45 2018 +0200

----------------------------------------------------------------------
 .../hadoop/hdfs/client/impl/BlockReaderRemote.java    | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/344f3247/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
index caf15e4..ea1baed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderRemote.java
@@ -129,16 +129,22 @@ public class BlockReaderRemote implements BlockReader {
   @Override
   public synchronized int read(byte[] buf, int off, int len)
       throws IOException {
-    UUID randomId = (LOG.isTraceEnabled() ? UUID.randomUUID() : null);
-    LOG.trace("Starting read #{} file {} from datanode {}",
-        randomId, filename, datanodeID.getHostName());
+    boolean logTraceEnabled = LOG.isTraceEnabled();
+    UUID randomId = null;
+    if (logTraceEnabled) {
+      randomId = UUID.randomUUID();
+      LOG.trace("Starting read #{} file {} from datanode {}",
+          randomId, filename, datanodeID.getHostName());
+    }
 
     if (curDataSlice == null ||
         curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
       readNextPacket();
     }
 
-    LOG.trace("Finishing read #{}", randomId);
+    if (logTraceEnabled) {
+      LOG.trace("Finishing read #{}", randomId);
+    }
 
     if (curDataSlice.remaining() == 0) {
       // we're at EOF now


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
new file mode 100644
index 0000000..7c8595c
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
@@ -0,0 +1,1349 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.client.rest.OzoneException;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.hdds.scm.server.SCMStorage;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServicePort;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.KeyArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.web.request.OzoneQuota;
+import org.apache.hadoop.ozone.web.response.BucketInfo;
+import org.apache.hadoop.ozone.web.response.KeyInfo;
+import org.apache.hadoop.ozone.web.response.VolumeInfo;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.web.handlers.ListArgs;
+import org.apache.hadoop.ozone.web.response.ListBuckets;
+import org.apache.hadoop.ozone.web.response.ListKeys;
+import org.apache.hadoop.ozone.web.response.ListVolumes;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.MetadataKeyFilters;
+import org.apache.hadoop.utils.MetadataStore;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.net.InetSocketAddress;
+import java.text.ParseException;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.List;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_CLIENT_ADDRESS_KEY;
+
+/**
+ * Test Ozone Manager operation in distributed handler scenario.
+ */
+public class TestOzoneManager {
+  private static MiniOzoneCluster cluster = null;
+  private static StorageHandler storageHandler;
+  private static UserArgs userArgs;
+  private static OMMetrics omMetrics;
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omId;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omId = UUID.randomUUID().toString();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
+    cluster =  MiniOzoneCluster.newBuilder(conf)
+        .setClusterId(clusterId)
+        .setScmId(scmId)
+        .setOmId(omId)
+        .build();
+    cluster.waitForClusterToBeReady();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    omMetrics = cluster.getOzoneManager().getMetrics();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  // Create a volume and test its attribute after creating them
+  @Test(timeout = 60000)
+  public void testCreateVolume() throws IOException, OzoneException {
+    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    VolumeInfo retVolumeinfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName));
+    Assert.assertEquals(volumeCreateFailCount,
+        omMetrics.getNumVolumeCreateFails());
+  }
+
+  // Create a volume and modify the volume owner and then test its attributes
+  @Test(timeout = 60000)
+  public void testChangeVolumeOwner() throws IOException, OzoneException {
+    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
+    long volumeInfoFailCount = omMetrics.getNumVolumeInfoFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    String newUserName = "user" + RandomStringUtils.randomNumeric(5);
+    createVolumeArgs.setUserName(newUserName);
+    storageHandler.setVolumeOwner(createVolumeArgs);
+
+    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+
+    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
+    Assert.assertFalse(retVolumeInfo.getOwner().getName().equals(userName));
+    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(newUserName));
+    Assert.assertEquals(volumeCreateFailCount,
+        omMetrics.getNumVolumeCreateFails());
+    Assert.assertEquals(volumeInfoFailCount,
+        omMetrics.getNumVolumeInfoFails());
+  }
+
+  // Create a volume and modify the volume owner and then test its attributes
+  @Test(timeout = 60000)
+  public void testChangeVolumeQuota() throws IOException, OzoneException {
+    long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails();
+    long numVolumeInfoFail = omMetrics.getNumVolumeInfoFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    Random rand = new Random();
+
+    // Create a new volume with a quota
+    OzoneQuota createQuota =
+        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    createVolumeArgs.setQuota(createQuota);
+    storageHandler.createVolume(createVolumeArgs);
+
+    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertEquals(createQuota.sizeInBytes(),
+        retVolumeInfo.getQuota().sizeInBytes());
+
+    // Set a new quota and test it
+    OzoneQuota setQuota =
+        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
+    createVolumeArgs.setQuota(setQuota);
+    storageHandler.setVolumeQuota(createVolumeArgs, false);
+    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertEquals(setQuota.sizeInBytes(),
+        retVolumeInfo.getQuota().sizeInBytes());
+
+    // Remove the quota and test it again
+    storageHandler.setVolumeQuota(createVolumeArgs, true);
+    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertEquals(OzoneConsts.MAX_QUOTA_IN_BYTES,
+        retVolumeInfo.getQuota().sizeInBytes());
+    Assert.assertEquals(numVolumeCreateFail,
+        omMetrics.getNumVolumeCreateFails());
+    Assert.assertEquals(numVolumeInfoFail,
+        omMetrics.getNumVolumeInfoFails());
+  }
+
+  // Create a volume and then delete it and then check for deletion
+  @Test(timeout = 60000)
+  public void testDeleteVolume() throws IOException, OzoneException {
+    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String volumeName1 = volumeName + "_A";
+    String volumeName2 = volumeName + "_AA";
+    VolumeArgs volumeArgs = null;
+    VolumeInfo volumeInfo = null;
+
+    // Create 2 empty volumes with same prefix.
+    volumeArgs = new VolumeArgs(volumeName1, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+
+    volumeArgs = new VolumeArgs(volumeName2, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+
+    volumeArgs  = new VolumeArgs(volumeName1, userArgs);
+    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
+    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName1));
+    Assert.assertTrue(volumeInfo.getOwner().getName().equals(userName));
+    Assert.assertEquals(volumeCreateFailCount,
+        omMetrics.getNumVolumeCreateFails());
+
+    // Volume with _A should be able to delete as it is empty.
+    storageHandler.deleteVolume(volumeArgs);
+
+    // Make sure volume with _AA suffix still exists.
+    volumeArgs = new VolumeArgs(volumeName2, userArgs);
+    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
+    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName2));
+
+    // Make sure volume with _A suffix is successfully deleted.
+    exception.expect(IOException.class);
+    exception.expectMessage("Info Volume failed, error:VOLUME_NOT_FOUND");
+    volumeArgs = new VolumeArgs(volumeName1, userArgs);
+    storageHandler.getVolumeInfo(volumeArgs);
+  }
+
+  // Create a volume and a bucket inside the volume,
+  // then delete it and then check for deletion failure
+  @Test(timeout = 60000)
+  public void testFailedDeleteVolume() throws IOException, OzoneException {
+    long numVolumeCreateFails = omMetrics.getNumVolumeCreateFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
+    Assert.assertEquals(numVolumeCreateFails,
+        omMetrics.getNumVolumeCreateFails());
+
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    try {
+      storageHandler.deleteVolume(createVolumeArgs);
+      Assert.fail("Expecting deletion should fail "
+          + "because volume is not empty");
+    } catch (IOException ex) {
+      Assert.assertEquals(ex.getMessage(),
+          "Delete Volume failed, error:VOLUME_NOT_EMPTY");
+    }
+    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
+    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
+  }
+
+  // Create a volume and test Volume access for a different user
+  @Test(timeout = 60000)
+  public void testAccessVolume() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String[] groupName =
+        {"group" + RandomStringUtils.randomNumeric(5)};
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    createVolumeArgs.setGroups(groupName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, userName,
+        OzoneAcl.OzoneACLRights.READ_WRITE);
+    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, userAcl));
+    OzoneAcl group = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, groupName[0],
+        OzoneAcl.OzoneACLRights.READ);
+    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, group));
+
+    // Create a different user and access should fail
+    String falseUserName = "user" + RandomStringUtils.randomNumeric(5);
+    OzoneAcl falseUserAcl =
+        new OzoneAcl(OzoneAcl.OzoneACLType.USER, falseUserName,
+            OzoneAcl.OzoneACLRights.READ_WRITE);
+    Assert.assertFalse(storageHandler
+        .checkVolumeAccess(volumeName, falseUserAcl));
+    // Checking access with user name and Group Type should fail
+    OzoneAcl falseGroupAcl = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, userName,
+        OzoneAcl.OzoneACLRights.READ_WRITE);
+    Assert.assertFalse(storageHandler
+        .checkVolumeAccess(volumeName, falseGroupAcl));
+
+    // Access for acl type world should also fail
+    OzoneAcl worldAcl =
+        new OzoneAcl(OzoneAcl.OzoneACLType.WORLD, "",
+            OzoneAcl.OzoneACLRights.READ);
+    Assert.assertFalse(storageHandler.checkVolumeAccess(volumeName, worldAcl));
+
+    Assert.assertEquals(0, omMetrics.getNumVolumeCheckAccessFails());
+    Assert.assertEquals(0, omMetrics.getNumVolumeCreateFails());
+  }
+
+  @Test(timeout = 60000)
+  public void testCreateBucket() throws IOException, OzoneException {
+    long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails();
+    long numBucketCreateFail = omMetrics.getNumBucketCreateFails();
+    long numBucketInfoFail = omMetrics.getNumBucketInfoFails();
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
+        userArgs);
+    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
+    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
+    Assert.assertEquals(numVolumeCreateFail,
+        omMetrics.getNumVolumeCreateFails());
+    Assert.assertEquals(numBucketCreateFail,
+        omMetrics.getNumBucketCreateFails());
+    Assert.assertEquals(numBucketInfoFail,
+        omMetrics.getNumBucketInfoFails());
+  }
+
+  @Test(timeout = 60000)
+  public void testDeleteBucket() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
+        userArgs);
+    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
+    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
+    storageHandler.deleteBucket(bucketArgs);
+    exception.expect(IOException.class);
+    exception.expectMessage("Info Bucket failed, error: BUCKET_NOT_FOUND");
+    storageHandler.getBucketInfo(getBucketArgs);
+  }
+
+  @Test(timeout = 60000)
+  public void testDeleteNonExistingBucket() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
+        userArgs);
+    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
+    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
+    BucketArgs newBucketArgs = new BucketArgs(
+        volumeName, bucketName + "_invalid", userArgs);
+    exception.expect(IOException.class);
+    exception.expectMessage("Delete Bucket failed, error:BUCKET_NOT_FOUND");
+    storageHandler.deleteBucket(newBucketArgs);
+  }
+
+
+  @Test(timeout = 60000)
+  public void testDeleteNonEmptyBucket() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
+    volumeArgs.setUserName(userName);
+    volumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(volumeArgs);
+    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    storageHandler.createBucket(bucketArgs);
+    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
+        userArgs);
+    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
+    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
+    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
+    String dataString = RandomStringUtils.randomAscii(100);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    exception.expect(IOException.class);
+    exception.expectMessage("Delete Bucket failed, error:BUCKET_NOT_EMPTY");
+    storageHandler.deleteBucket(bucketArgs);
+  }
+
+  /**
+   * Basic test of both putKey and getKey from OM, as one can not be tested
+   * without the other.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testGetKeyWriterReader() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyAllocates = omMetrics.getNumKeyAllocates();
+    long numKeyLookups = omMetrics.getNumKeyLookups();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString = RandomStringUtils.randomAscii(100);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    Assert.assertEquals(1 + numKeyAllocates, omMetrics.getNumKeyAllocates());
+
+    byte[] data = new byte[dataString.length()];
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
+    Assert.assertEquals(1 + numKeyLookups, omMetrics.getNumKeyLookups());
+  }
+
+  /**
+   * Test write the same key twice, the second write should fail, as currently
+   * key overwrite is not supported.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testKeyOverwrite() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyAllocateFails = omMetrics.getNumKeyAllocateFails();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(100);
+    String dataString = RandomStringUtils.randomAscii(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+
+    // We allow the key overwrite to be successful. Please note : Till
+    // HDFS-11922 is fixed this causes a data block leak on the data node side.
+    // That is this overwrite only overwrites the keys on OM. We need to
+    // garbage collect those blocks from datanode.
+    KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    storageHandler.newKeyWriter(keyArgs2);
+    Assert
+        .assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails());
+  }
+
+  /**
+   * Test get a non-exiting key.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testGetNonExistKey() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyLookupFails = omMetrics.getNumKeyLookupFails();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    // try to get the key, should fail as it hasn't been created
+    exception.expect(IOException.class);
+    exception.expectMessage("KEY_NOT_FOUND");
+    storageHandler.newKeyReader(keyArgs);
+    Assert.assertEquals(1 + numKeyLookupFails,
+        omMetrics.getNumKeyLookupFails());
+  }
+
+  /**
+   * Test delete keys for om.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testDeleteKey() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyDeletes = omMetrics.getNumKeyDeletes();
+    long numKeyDeleteFails = omMetrics.getNumKeyDeletesFails();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    keyArgs.setSize(100);
+    String dataString = RandomStringUtils.randomAscii(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+
+    storageHandler.deleteKey(keyArgs);
+    Assert.assertEquals(1 + numKeyDeletes, omMetrics.getNumKeyDeletes());
+
+    // Make sure the deleted key has been renamed.
+    MetadataStore store = cluster.getOzoneManager().
+        getMetadataManager().getStore();
+    List<Map.Entry<byte[], byte[]>> list = store.getRangeKVs(null, 10,
+        new MetadataKeyFilters.KeyPrefixFilter()
+            .addFilter(DELETING_KEY_PREFIX));
+    Assert.assertEquals(1, list.size());
+
+    // Delete the key again to test deleting non-existing key.
+    try {
+      storageHandler.deleteKey(keyArgs);
+      Assert.fail("Expected exception not thrown.");
+    } catch (IOException ioe) {
+      Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND"));
+    }
+    Assert.assertEquals(1 + numKeyDeleteFails,
+        omMetrics.getNumKeyDeletesFails());
+  }
+
+  /**
+   * Test rename key for om.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testRenameKey() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    long numKeyRenames = omMetrics.getNumKeyRenames();
+    long numKeyRenameFails = omMetrics.getNumKeyRenameFails();
+    int testRenameFails = 0;
+    int testRenames = 0;
+    IOException ioe = null;
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    storageHandler.createBucket(bucketArgs);
+
+    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    keyArgs.setSize(100);
+    String toKeyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    // Rename from non-existent key should fail
+    try {
+      testRenames++;
+      storageHandler.renameKey(keyArgs, toKeyName);
+    } catch (IOException e) {
+      testRenameFails++;
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
+
+    // Write the contents of the key to be renamed
+    String dataString = RandomStringUtils.randomAscii(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+
+    // Rename the key
+    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
+    testRenames++;
+    storageHandler.renameKey(keyArgs, toKeyName);
+    Assert.assertEquals(numKeyRenames + testRenames,
+        omMetrics.getNumKeyRenames());
+    Assert.assertEquals(numKeyRenameFails + testRenameFails,
+        omMetrics.getNumKeyRenameFails());
+
+    // Try to get the key, should fail as it has been renamed
+    try {
+      storageHandler.newKeyReader(keyArgs);
+    } catch (IOException e) {
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND"));
+
+    // Verify the contents of the renamed key
+    keyArgs = new KeyArgs(toKeyName, bucketArgs);
+    InputStream in = storageHandler.newKeyReader(keyArgs);
+    byte[] b = new byte[dataString.getBytes().length];
+    in.read(b);
+    Assert.assertEquals(new String(b), dataString);
+
+    // Rewrite the renamed key. Rename to key which already exists should fail.
+    keyArgs = new KeyArgs(keyName, bucketArgs);
+    keyArgs.setSize(100);
+    dataString = RandomStringUtils.randomAscii(100);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+      stream.close();
+      testRenames++;
+      storageHandler.renameKey(keyArgs, toKeyName);
+    } catch (IOException e) {
+      testRenameFails++;
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
+
+    // Rename to empty string should fail
+    toKeyName = "";
+    try {
+      testRenames++;
+      storageHandler.renameKey(keyArgs, toKeyName);
+    } catch (IOException e) {
+      testRenameFails++;
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
+
+    // Rename from empty string should fail
+    keyArgs = new KeyArgs("", bucketArgs);
+    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
+    try {
+      testRenames++;
+      storageHandler.renameKey(keyArgs, toKeyName);
+    } catch (IOException e) {
+      testRenameFails++;
+      ioe = e;
+    }
+    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
+
+    Assert.assertEquals(numKeyRenames + testRenames,
+        omMetrics.getNumKeyRenames());
+    Assert.assertEquals(numKeyRenameFails + testRenameFails,
+        omMetrics.getNumKeyRenameFails());
+  }
+
+  @Test(timeout = 60000)
+  public void testListBuckets() throws IOException, OzoneException {
+    ListBuckets result = null;
+    ListArgs listBucketArgs = null;
+
+    // Create volume - volA.
+    final String volAname = "volA";
+    VolumeArgs volAArgs = new VolumeArgs(volAname, userArgs);
+    volAArgs.setUserName("userA");
+    volAArgs.setAdminName("adminA");
+    storageHandler.createVolume(volAArgs);
+
+    // Create 20 buckets in volA for tests.
+    for (int i=0; i<10; i++) {
+      // Create "/volA/aBucket_0" to "/volA/aBucket_9" buckets in volA volume.
+      BucketArgs aBuckets = new BucketArgs(volAname,
+          "aBucket_" + i, userArgs);
+      if(i % 3 == 0) {
+        aBuckets.setStorageType(StorageType.ARCHIVE);
+      } else {
+        aBuckets.setStorageType(StorageType.DISK);
+      }
+      storageHandler.createBucket(aBuckets);
+
+      // Create "/volA/bBucket_0" to "/volA/bBucket_9" buckets in volA volume.
+      BucketArgs bBuckets = new BucketArgs(volAname,
+          "bBucket_" + i, userArgs);
+      if(i % 3 == 0) {
+        bBuckets.setStorageType(StorageType.RAM_DISK);
+      } else {
+        bBuckets.setStorageType(StorageType.SSD);
+      }
+      storageHandler.createBucket(bBuckets);
+    }
+
+    VolumeArgs volArgs = new VolumeArgs(volAname, userArgs);
+
+    // List all buckets in volA.
+    listBucketArgs = new ListArgs(volArgs, null, 100, null);
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(20, result.getBuckets().size());
+    List<BucketInfo> archiveBuckets = result.getBuckets().stream()
+        .filter(item -> item.getStorageType() == StorageType.ARCHIVE)
+        .collect(Collectors.toList());
+    Assert.assertEquals(4, archiveBuckets.size());
+
+    // List buckets with prefix "aBucket".
+    listBucketArgs = new ListArgs(volArgs, "aBucket", 100, null);
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(10, result.getBuckets().size());
+    Assert.assertTrue(result.getBuckets().stream()
+        .allMatch(entry -> entry.getBucketName().startsWith("aBucket")));
+
+    // List a certain number of buckets.
+    listBucketArgs = new ListArgs(volArgs, null, 3, null);
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(3, result.getBuckets().size());
+    Assert.assertEquals("aBucket_0",
+        result.getBuckets().get(0).getBucketName());
+    Assert.assertEquals("aBucket_1",
+        result.getBuckets().get(1).getBucketName());
+    Assert.assertEquals("aBucket_2",
+        result.getBuckets().get(2).getBucketName());
+
+    // List a certain number of buckets from the startKey.
+    listBucketArgs = new ListArgs(volArgs, null, 2, "bBucket_3");
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(2, result.getBuckets().size());
+    Assert.assertEquals("bBucket_4",
+        result.getBuckets().get(0).getBucketName());
+    Assert.assertEquals("bBucket_5",
+        result.getBuckets().get(1).getBucketName());
+
+    // Provide an invalid bucket name as start key.
+    listBucketArgs = new ListArgs(volArgs, null, 100, "unknown_bucket_name");
+    ListBuckets buckets = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(buckets.getBuckets().size(), 0);
+
+    // Use all arguments.
+    listBucketArgs = new ListArgs(volArgs, "b", 5, "bBucket_7");
+    result = storageHandler.listBuckets(listBucketArgs);
+    Assert.assertEquals(2, result.getBuckets().size());
+    Assert.assertEquals("bBucket_8",
+        result.getBuckets().get(0).getBucketName());
+    Assert.assertEquals("bBucket_9",
+        result.getBuckets().get(1).getBucketName());
+
+    // Provide an invalid maxKeys argument.
+    try {
+      listBucketArgs = new ListArgs(volArgs, null, -1, null);
+      storageHandler.listBuckets(listBucketArgs);
+      Assert.fail("Expecting an error when the given"
+          + " maxKeys argument is invalid.");
+    } catch (Exception e) {
+      Assert.assertTrue(e.getMessage()
+          .contains(String.format("the value must be in range (0, %d]",
+              OzoneConsts.MAX_LISTBUCKETS_SIZE)));
+    }
+
+    // Provide an invalid volume name.
+    VolumeArgs invalidVolArgs = new VolumeArgs("invalid_name", userArgs);
+    try {
+      listBucketArgs = new ListArgs(invalidVolArgs, null, 100, null);
+      storageHandler.listBuckets(listBucketArgs);
+      Assert.fail("Expecting an error when the given volume name is invalid.");
+    } catch (Exception e) {
+      Assert.assertTrue(e instanceof IOException);
+      Assert.assertTrue(e.getMessage()
+          .contains(Status.VOLUME_NOT_FOUND.name()));
+    }
+  }
+
+  /**
+   * Test list keys.
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testListKeys() throws IOException, OzoneException {
+    ListKeys result = null;
+    ListArgs listKeyArgs = null;
+
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    // Write 20 keys in bucket.
+    int numKeys = 20;
+    String keyName = "Key";
+    KeyArgs keyArgs = null;
+    for (int i = 0; i < numKeys; i++) {
+      if (i % 2 == 0) {
+        // Create /volume/bucket/aKey[0,2,4,...,18] in bucket.
+        keyArgs = new KeyArgs("a" + keyName + i, bucketArgs);
+      } else {
+        // Create /volume/bucket/bKey[1,3,5,...,19] in bucket.
+        keyArgs = new KeyArgs("b" + keyName + i, bucketArgs);
+      }
+      keyArgs.setSize(4096);
+
+      // Just for testing list keys call, so no need to write real data.
+      OutputStream stream = storageHandler.newKeyWriter(keyArgs);
+      stream.close();
+    }
+
+    // List all keys in bucket.
+    bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
+    listKeyArgs = new ListArgs(bucketArgs, null, 100, null);
+    result = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(numKeys, result.getKeyList().size());
+
+    // List keys with prefix "aKey".
+    listKeyArgs = new ListArgs(bucketArgs, "aKey", 100, null);
+    result = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(numKeys / 2, result.getKeyList().size());
+    Assert.assertTrue(result.getKeyList().stream()
+        .allMatch(entry -> entry.getKeyName().startsWith("aKey")));
+
+    // List a certain number of keys.
+    listKeyArgs = new ListArgs(bucketArgs, null, 3, null);
+    result = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(3, result.getKeyList().size());
+    Assert.assertEquals("aKey0",
+        result.getKeyList().get(0).getKeyName());
+    Assert.assertEquals("aKey10",
+        result.getKeyList().get(1).getKeyName());
+    Assert.assertEquals("aKey12",
+        result.getKeyList().get(2).getKeyName());
+
+    // List a certain number of keys from the startKey.
+    listKeyArgs = new ListArgs(bucketArgs, null, 2, "bKey1");
+    result = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(2, result.getKeyList().size());
+    Assert.assertEquals("bKey11",
+        result.getKeyList().get(0).getKeyName());
+    Assert.assertEquals("bKey13",
+        result.getKeyList().get(1).getKeyName());
+
+    // Provide an invalid key name as start key.
+    listKeyArgs = new ListArgs(bucketArgs, null, 100, "invalid_start_key");
+    ListKeys keys = storageHandler.listKeys(listKeyArgs);
+    Assert.assertEquals(keys.getKeyList().size(), 0);
+
+    // Provide an invalid maxKeys argument.
+    try {
+      listKeyArgs = new ListArgs(bucketArgs, null, -1, null);
+      storageHandler.listBuckets(listKeyArgs);
+      Assert.fail("Expecting an error when the given"
+          + " maxKeys argument is invalid.");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains(
+          String.format("the value must be in range (0, %d]",
+              OzoneConsts.MAX_LISTKEYS_SIZE), e);
+    }
+
+    // Provide an invalid bucket name.
+    bucketArgs = new BucketArgs("invalid_bucket", createVolumeArgs);
+    try {
+      listKeyArgs = new ListArgs(bucketArgs, null, numKeys, null);
+      storageHandler.listKeys(listKeyArgs);
+      Assert.fail(
+          "Expecting an error when the given bucket name is invalid.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          Status.BUCKET_NOT_FOUND.name(), e);
+    }
+  }
+
+  @Test
+  public void testListVolumes() throws IOException, OzoneException {
+
+    String user0 = "testListVolumes-user-0";
+    String user1 = "testListVolumes-user-1";
+    String adminUser = "testListVolumes-admin";
+    ListArgs listVolumeArgs;
+    ListVolumes volumes;
+
+    // Create 10 volumes by user0 and user1
+    String[] user0vols = new String[10];
+    String[] user1vols = new String[10];
+    for (int i =0; i<10; i++) {
+      VolumeArgs createVolumeArgs;
+      String user0VolName = "Vol-" + user0 + "-" + i;
+      user0vols[i] = user0VolName;
+      createVolumeArgs = new VolumeArgs(user0VolName, userArgs);
+      createVolumeArgs.setUserName(user0);
+      createVolumeArgs.setAdminName(adminUser);
+      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
+      storageHandler.createVolume(createVolumeArgs);
+
+      String user1VolName = "Vol-" + user1 + "-" + i;
+      user1vols[i] = user1VolName;
+      createVolumeArgs = new VolumeArgs(user1VolName, userArgs);
+      createVolumeArgs.setUserName(user1);
+      createVolumeArgs.setAdminName(adminUser);
+      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
+      storageHandler.createVolume(createVolumeArgs);
+    }
+
+    // Test list all volumes
+    UserArgs userArgs0 = new UserArgs(user0, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    listVolumeArgs = new ListArgs(userArgs0, "Vol-testListVolumes", 100, null);
+    listVolumeArgs.setRootScan(true);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(20, volumes.getVolumes().size());
+
+    // Test list all volumes belongs to an user
+    listVolumeArgs = new ListArgs(userArgs0, null, 100, null);
+    listVolumeArgs.setRootScan(false);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(10, volumes.getVolumes().size());
+
+    // Test prefix
+    listVolumeArgs = new ListArgs(userArgs0,
+        "Vol-" + user0 + "-3", 100, null);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(1, volumes.getVolumes().size());
+    Assert.assertEquals(user0vols[3],
+        volumes.getVolumes().get(0).getVolumeName());
+    Assert.assertEquals(user0,
+        volumes.getVolumes().get(0).getOwner().getName());
+
+    // Test list volumes by user
+    UserArgs userArgs1 = new UserArgs(user1, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    listVolumeArgs = new ListArgs(userArgs1, null, 100, null);
+    listVolumeArgs.setRootScan(false);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(10, volumes.getVolumes().size());
+    Assert.assertEquals(user1,
+        volumes.getVolumes().get(3).getOwner().getName());
+
+    // Make sure all available fields are returned
+    final String user0vol4 = "Vol-" + user0 + "-4";
+    final String user0vol5 = "Vol-" + user0 + "-5";
+    listVolumeArgs = new ListArgs(userArgs0, null, 1, user0vol4);
+    listVolumeArgs.setRootScan(false);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(1, volumes.getVolumes().size());
+    Assert.assertEquals(user0,
+        volumes.getVolumes().get(0).getOwner().getName());
+    Assert.assertEquals(user0vol5,
+        volumes.getVolumes().get(0).getVolumeName());
+    Assert.assertEquals(5,
+        volumes.getVolumes().get(0).getQuota().getSize());
+    Assert.assertEquals(OzoneQuota.Units.GB,
+        volumes.getVolumes().get(0).getQuota().getUnit());
+
+    // User doesn't have volumes
+    UserArgs userArgsX = new UserArgs("unknwonUser", OzoneUtils.getRequestID(),
+        null, null, null, null);
+    listVolumeArgs = new ListArgs(userArgsX, null, 100, null);
+    listVolumeArgs.setRootScan(false);
+    volumes = storageHandler.listVolumes(listVolumeArgs);
+    Assert.assertEquals(0, volumes.getVolumes().size());
+  }
+
+  /**
+   * Test get key information.
+   *
+   * @throws IOException
+   * @throws OzoneException
+   */
+  @Test
+  public void testGetKeyInfo() throws IOException,
+      OzoneException, ParseException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    long currentTime = Time.now();
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String keyName = "testKey";
+    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    keyArgs.setSize(4096);
+
+
+    OutputStream stream = storageHandler.newKeyWriter(keyArgs);
+    stream.close();
+
+    KeyInfo keyInfo = storageHandler.getKeyInfo(keyArgs);
+    // Compare the time in second unit since the date string reparsed to
+    // millisecond will lose precision.
+    Assert.assertTrue(
+        (HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()) / 1000) >= (
+            currentTime / 1000));
+    Assert.assertTrue(
+        (HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()) / 1000) >= (
+            currentTime / 1000));
+    Assert.assertEquals(keyName, keyInfo.getKeyName());
+    // with out data written, the size would be 0
+    Assert.assertEquals(0, keyInfo.getSize());
+  }
+
+  /**
+   * Test that the write can proceed without having to set the right size.
+   *
+   * @throws IOException
+   */
+  @Test
+  public void testWriteSize() throws IOException, OzoneException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString = RandomStringUtils.randomAscii(100);
+    // write a key without specifying size at all
+    String keyName = "testKey";
+    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    byte[] data = new byte[dataString.length()];
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
+
+    // write a key with a size, but write above it.
+    String keyName1 = "testKey1";
+    KeyArgs keyArgs1 = new KeyArgs(keyName1, bucketArgs);
+    keyArgs1.setSize(30);
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs1)) {
+      stream.write(dataString.getBytes());
+    }
+    byte[] data1 = new byte[dataString.length()];
+    try (InputStream in = storageHandler.newKeyReader(keyArgs1)) {
+      in.read(data1);
+    }
+    Assert.assertEquals(dataString, DFSUtil.bytes2String(data1));
+  }
+
+  /**
+   * Tests the RPC call for getting scmId and clusterId from SCM.
+   * @throws IOException
+   */
+  @Test
+  public void testGetScmInfo() throws IOException {
+    ScmInfo info = cluster.getOzoneManager().getScmInfo();
+    Assert.assertEquals(clusterId, info.getClusterId());
+    Assert.assertEquals(scmId, info.getScmId());
+  }
+
+
+  @Test
+  public void testExpiredOpenKey() throws Exception {
+    BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
+        .getOzoneManager().getKeyManager()).getOpenKeyCleanupService();
+
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    // open some keys.
+
+    KeyArgs keyArgs1 = new KeyArgs("testKey1", bucketArgs);
+    KeyArgs keyArgs2 = new KeyArgs("testKey2", bucketArgs);
+    KeyArgs keyArgs3 = new KeyArgs("testKey3", bucketArgs);
+    KeyArgs keyArgs4 = new KeyArgs("testKey4", bucketArgs);
+    List<BlockGroup> openKeys;
+    storageHandler.newKeyWriter(keyArgs1);
+    storageHandler.newKeyWriter(keyArgs2);
+    storageHandler.newKeyWriter(keyArgs3);
+    storageHandler.newKeyWriter(keyArgs4);
+
+    Set<String> expected = Stream.of(
+        "testKey1", "testKey2", "testKey3", "testKey4")
+        .collect(Collectors.toSet());
+
+    // Now all k1-k4 should be in open state, so ExpiredOpenKeys should not
+    // contain these values.
+    openKeys = cluster.getOzoneManager()
+        .getMetadataManager().getExpiredOpenKeys();
+
+    for (BlockGroup bg : openKeys) {
+      String[] subs = bg.getGroupID().split("/");
+      String keyName = subs[subs.length - 1];
+      Assert.assertFalse(expected.contains(keyName));
+    }
+
+    Thread.sleep(2000);
+    // Now all k1-k4 should be in ExpiredOpenKeys
+    openKeys = cluster.getOzoneManager()
+        .getMetadataManager().getExpiredOpenKeys();
+    for (BlockGroup bg : openKeys) {
+      String[] subs = bg.getGroupID().split("/");
+      String keyName = subs[subs.length - 1];
+      if (expected.contains(keyName)) {
+        expected.remove(keyName);
+      }
+    }
+    Assert.assertEquals(0, expected.size());
+
+    KeyArgs keyArgs5 = new KeyArgs("testKey5", bucketArgs);
+    storageHandler.newKeyWriter(keyArgs5);
+
+    openKeyCleanUpService.triggerBackgroundTaskForTesting();
+    Thread.sleep(2000);
+    // now all k1-k4 should have been removed by the clean-up task, only k5
+    // should be present in ExpiredOpenKeys.
+    openKeys =
+        cluster.getOzoneManager().getMetadataManager().getExpiredOpenKeys();
+    System.out.println(openKeys);
+    boolean key5found = false;
+    Set<String> removed = Stream.of(
+        "testKey1", "testKey2", "testKey3", "testKey4")
+        .collect(Collectors.toSet());
+    for (BlockGroup bg : openKeys) {
+      String[] subs = bg.getGroupID().split("/");
+      String keyName = subs[subs.length - 1];
+      Assert.assertFalse(removed.contains(keyName));
+      if (keyName.equals("testKey5")) {
+        key5found = true;
+      }
+    }
+    Assert.assertTrue(key5found);
+  }
+
+  /**
+   * Tests the OM Initialization.
+   * @throws IOException
+   */
+  @Test
+  public void testOmInitialization() throws IOException {
+    // Read the version file info from OM version file
+    OMStorage omStorage = cluster.getOzoneManager().getOmStorage();
+    SCMStorage scmStorage = new SCMStorage(conf);
+    // asserts whether cluster Id and SCM ID are properly set in SCM Version
+    // file.
+    Assert.assertEquals(clusterId, scmStorage.getClusterID());
+    Assert.assertEquals(scmId, scmStorage.getScmId());
+    // asserts whether OM Id is properly set in OM Version file.
+    Assert.assertEquals(omId, omStorage.getOmId());
+    // asserts whether the SCM info is correct in OM Version file.
+    Assert.assertEquals(clusterId, omStorage.getClusterID());
+    Assert.assertEquals(scmId, omStorage.getScmId());
+  }
+
+  /**
+   * Tests the OM Initialization Failure.
+   * @throws IOException
+   */
+  @Test
+  public void testOmInitializationFailure() throws Exception {
+    OzoneConfiguration config = new OzoneConfiguration();
+    final String path =
+        GenericTestUtils.getTempPath(UUID.randomUUID().toString());
+    Path metaDirPath = Paths.get(path, "om-meta");
+    config.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
+    config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
+    config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
+        conf.get(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY));
+    exception.expect(OMException.class);
+    exception.expectMessage("OM not initialized.");
+    OzoneManager.createOm(null, config);
+    OMStorage omStore = new OMStorage(config);
+    omStore.setClusterId("testClusterId");
+    omStore.setScmId("testScmId");
+    // writes the version file properties
+    omStore.initialize();
+    exception.expect(OMException.class);
+    exception.expectMessage("SCM version info mismatch.");
+    OzoneManager.createOm(null, conf);
+  }
+
+  @Test
+  public void testGetServiceList() throws IOException {
+    long numGetServiceListCalls = omMetrics.getNumGetServiceLists();
+    List<ServiceInfo> services = cluster.getOzoneManager().getServiceList();
+
+    Assert.assertEquals(numGetServiceListCalls + 1,
+        omMetrics.getNumGetServiceLists());
+
+    ServiceInfo omInfo = services.stream().filter(
+        a -> a.getNodeType().equals(HddsProtos.NodeType.OM))
+        .collect(Collectors.toList()).get(0);
+    InetSocketAddress omAddress = new InetSocketAddress(omInfo.getHostname(),
+        omInfo.getPort(ServicePort.Type.RPC));
+    Assert.assertEquals(NetUtils.createSocketAddr(
+        conf.get(OZONE_OM_ADDRESS_KEY)), omAddress);
+
+    ServiceInfo scmInfo = services.stream().filter(
+        a -> a.getNodeType().equals(HddsProtos.NodeType.SCM))
+        .collect(Collectors.toList()).get(0);
+    InetSocketAddress scmAddress = new InetSocketAddress(scmInfo.getHostname(),
+        scmInfo.getPort(ServicePort.Type.RPC));
+    Assert.assertEquals(NetUtils.createSocketAddr(
+        conf.get(OZONE_SCM_CLIENT_ADDRESS_KEY)), scmAddress);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
new file mode 100644
index 0000000..8168d27
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.core.type.TypeReference;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServicePort;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.http.HttpResponse;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.util.EntityUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
+import static org.apache.hadoop.ozone.OmUtils.getOmAddressForClients;
+
+/**
+ * This class is to test the REST interface exposed by OzoneManager.
+ */
+public class TestOzoneManagerRestInterface {
+
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    conf = new OzoneConfiguration();
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testGetServiceList() throws Exception {
+    OzoneManagerHttpServer server =
+        cluster.getOzoneManager().getHttpServer();
+    HttpClient client = HttpClients.createDefault();
+    String connectionUri = "http://" +
+        NetUtils.getHostPortString(server.getHttpAddress());
+    HttpGet httpGet = new HttpGet(connectionUri + "/serviceList");
+    HttpResponse response = client.execute(httpGet);
+    String serviceListJson = EntityUtils.toString(response.getEntity());
+
+    ObjectMapper objectMapper = new ObjectMapper();
+    TypeReference<List<ServiceInfo>> serviceInfoReference =
+        new TypeReference<List<ServiceInfo>>() {};
+    List<ServiceInfo> serviceInfos = objectMapper.readValue(
+        serviceListJson, serviceInfoReference);
+    Map<HddsProtos.NodeType, ServiceInfo> serviceMap = new HashMap<>();
+    for (ServiceInfo serviceInfo : serviceInfos) {
+      serviceMap.put(serviceInfo.getNodeType(), serviceInfo);
+    }
+
+    InetSocketAddress omAddress =
+        getOmAddressForClients(conf);
+    ServiceInfo omInfo = serviceMap.get(HddsProtos.NodeType.OM);
+
+    Assert.assertEquals(omAddress.getHostName(), omInfo.getHostname());
+    Assert.assertEquals(omAddress.getPort(),
+        omInfo.getPort(ServicePort.Type.RPC));
+    Assert.assertEquals(server.getHttpAddress().getPort(),
+        omInfo.getPort(ServicePort.Type.HTTP));
+
+    InetSocketAddress scmAddress =
+        getScmAddressForClients(conf);
+    ServiceInfo scmInfo = serviceMap.get(HddsProtos.NodeType.SCM);
+
+    Assert.assertEquals(scmAddress.getHostName(), scmInfo.getHostname());
+    Assert.assertEquals(scmAddress.getPort(),
+        scmInfo.getPort(ServicePort.Type.RPC));
+
+    ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE);
+    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
+        .getDatanodeDetails();
+    Assert.assertEquals(datanodeDetails.getHostName(),
+        datanodeInfo.getHostname());
+
+    Map<ServicePort.Type, Integer> ports = datanodeInfo.getPorts();
+    for(ServicePort.Type type : ports.keySet()) {
+      switch (type) {
+      case HTTP:
+      case HTTPS:
+        Assert.assertEquals(
+            datanodeDetails.getPort(DatanodeDetails.Port.Name.REST).getValue(),
+            ports.get(type));
+        break;
+      default:
+        // OM only sends Datanode's info port details
+        // i.e. HTTP or HTTPS
+        // Other ports are not expected as of now.
+        Assert.fail();
+        break;
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index ed8f0d5..5082870 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.client.rest.RestClient;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.web.ozShell.Shell;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.apache.hadoop.ozone.web.response.BucketInfo;
@@ -167,23 +167,23 @@ public class TestOzoneShell {
     System.setOut(new PrintStream(out));
     System.setErr(new PrintStream(err));
     if(clientProtocol.equals(RestClient.class)) {
-      String hostName = cluster.getKeySpaceManager().getHttpServer()
+      String hostName = cluster.getOzoneManager().getHttpServer()
           .getHttpAddress().getHostName();
       int port = cluster
-          .getKeySpaceManager().getHttpServer().getHttpAddress().getPort();
+          .getOzoneManager().getHttpServer().getHttpAddress().getPort();
       url = String.format("http://" + hostName + ":" + port);
     } else {
       List<ServiceInfo> services = null;
       try {
-        services = cluster.getKeySpaceManager().getServiceList();
+        services = cluster.getOzoneManager().getServiceList();
       } catch (IOException e) {
-        LOG.error("Could not get service list from KSM");
+        LOG.error("Could not get service list from OM");
       }
       String hostName = services.stream().filter(
-          a -> a.getNodeType().equals(HddsProtos.NodeType.KSM))
+          a -> a.getNodeType().equals(HddsProtos.NodeType.OM))
           .collect(Collectors.toList()).get(0).getHostname();
 
-      String port = cluster.getKeySpaceManager().getRpcPort();
+      String port = cluster.getOzoneManager().getRpcPort();
       url = String.format("o3://" + hostName + ":" + port);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
index b4ed2b1..1a1f37c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacem
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.scm.cli.SQLCLI;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
@@ -113,7 +112,7 @@ public class TestContainerSQLCli {
     cluster.waitForClusterToBeReady();
     datanodeIpAddress = cluster.getHddsDatanodes().get(0)
         .getDatanodeDetails().getIpAddress();
-    cluster.getKeySpaceManager().stop();
+    cluster.getOzoneManager().stop();
     cluster.getStorageContainerManager().stop();
 
     nodeManager = cluster.getStorageContainerManager().getScmNodeManager();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
index 0e61391..e592d56 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
@@ -90,7 +90,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumes() throws IOException {
     super.testCreateVolumes(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -102,7 +102,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesWithQuota() throws IOException {
     super.testCreateVolumesWithQuota(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -114,7 +114,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesWithInvalidQuota() throws IOException {
     super.testCreateVolumesWithInvalidQuota(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -128,7 +128,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesWithInvalidUser() throws IOException {
     super.testCreateVolumesWithInvalidUser(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -143,7 +143,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesWithOutAdminRights() throws IOException {
     super.testCreateVolumesWithOutAdminRights(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
 
@@ -155,7 +155,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
   @Test
   public void testCreateVolumesInLoop() throws IOException {
     super.testCreateVolumesInLoop(port);
-    Assert.assertEquals(0, cluster.getKeySpaceManager()
+    Assert.assertEquals(0, cluster.getOzoneManager()
         .getMetrics().getNumVolumeCreateFails());
   }
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
index b86c577..a95bd0e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
@@ -48,13 +48,13 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .Status;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
@@ -644,15 +644,15 @@ public class TestKeys {
     }
   }
 
-  private int countKsmKeys(KeySpaceManager ksm) throws IOException {
+  private int countOmKeys(OzoneManager om) throws IOException {
     int totalCount = 0;
-    List<KsmVolumeArgs> volumes =
-        ksm.listAllVolumes(null, null, Integer.MAX_VALUE);
-    for (KsmVolumeArgs volume : volumes) {
-      List<KsmBucketInfo> buckets =
-          ksm.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE);
-      for (KsmBucketInfo bucket : buckets) {
-        List<KsmKeyInfo> keys = ksm.listKeys(bucket.getVolumeName(),
+    List<OmVolumeArgs> volumes =
+        om.listAllVolumes(null, null, Integer.MAX_VALUE);
+    for (OmVolumeArgs volume : volumes) {
+      List<OmBucketInfo> buckets =
+          om.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE);
+      for (OmBucketInfo bucket : buckets) {
+        List<OmKeyInfo> keys = om.listKeys(bucket.getVolumeName(),
             bucket.getBucketName(), null, null, Integer.MAX_VALUE);
         totalCount += keys.size();
       }
@@ -662,10 +662,10 @@ public class TestKeys {
 
   @Test
   public void testDeleteKey() throws Exception {
-    KeySpaceManager ksm = ozoneCluster.getKeySpaceManager();
+    OzoneManager ozoneManager = ozoneCluster.getOzoneManager();
     // To avoid interference from other test cases,
     // we collect number of existing keys at the beginning
-    int numOfExistedKeys = countKsmKeys(ksm);
+    int numOfExistedKeys = countOmKeys(ozoneManager);
 
     // Keep tracking bucket keys info while creating them
     PutHelper helper = new PutHelper(client, path);
@@ -689,15 +689,15 @@ public class TestKeys {
     // count the total number of created keys.
     Set<Pair<String, String>> buckets = bucketKeys.getAllBuckets();
     for (Pair<String, String> buk : buckets) {
-      List<KsmKeyInfo> createdKeys =
-          ksm.listKeys(buk.getKey(), buk.getValue(), null, null, 20);
+      List<OmKeyInfo> createdKeys =
+          ozoneManager.listKeys(buk.getKey(), buk.getValue(), null, null, 20);
 
       // Memorize chunks that has been created,
       // so we can verify actual deletions at DN side later.
-      for (KsmKeyInfo keyInfo : createdKeys) {
-        List<KsmKeyLocationInfo> locations =
+      for (OmKeyInfo keyInfo : createdKeys) {
+        List<OmKeyLocationInfo> locations =
             keyInfo.getLatestVersionLocations().getLocationList();
-        for (KsmKeyLocationInfo location : locations) {
+        for (OmKeyLocationInfo location : locations) {
           KeyData keyData = new KeyData(location.getBlockID());
           KeyData blockInfo = cm.getContainerManager()
               .getKeyManager().getKey(keyData);
@@ -721,9 +721,9 @@ public class TestKeys {
     // Ensure all keys are created.
     Assert.assertEquals(20, numOfCreatedKeys);
 
-    // Ensure all keys are visible from KSM.
+    // Ensure all keys are visible from OM.
     // Total number should be numOfCreated + numOfExisted
-    Assert.assertEquals(20 + numOfExistedKeys, countKsmKeys(ksm));
+    Assert.assertEquals(20 + numOfExistedKeys, countOmKeys(ozoneManager));
 
     // Delete 10 keys
     int delCount = 20;
@@ -732,21 +732,21 @@ public class TestKeys {
       List<String> bks = bucketKeys.getBucketKeys(bucketInfo.getValue());
       for (String keyName : bks) {
         if (delCount > 0) {
-          KsmKeyArgs arg =
-              new KsmKeyArgs.Builder().setVolumeName(bucketInfo.getKey())
+          OmKeyArgs arg =
+              new OmKeyArgs.Builder().setVolumeName(bucketInfo.getKey())
                   .setBucketName(bucketInfo.getValue()).setKeyName(keyName)
                   .build();
-          ksm.deleteKey(arg);
+          ozoneManager.deleteKey(arg);
           delCount--;
         }
       }
     }
 
-    // It should be pretty quick that keys are removed from KSM namespace,
+    // It should be pretty quick that keys are removed from OM namespace,
     // because actual deletion happens in async mode.
     GenericTestUtils.waitFor(() -> {
       try {
-        int num = countKsmKeys(ksm);
+        int num = countOmKeys(ozoneManager);
         return num == (numOfExistedKeys);
       } catch (IOException e) {
         return false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep
deleted file mode 100644
index 09697dc..0000000
--- a/hadoop-ozone/integration-test/src/test/resources/webapps/ksm/.gitkeep
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep b/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep
new file mode 100644
index 0000000..09697dc
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/resources/webapps/ozoneManager/.gitkeep
@@ -0,0 +1,15 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
index 3128d31..2200cd8 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress;
+import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.*;
 import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
 import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
@@ -34,9 +34,8 @@ import com.sun.jersey.api.container.ContainerFactory;
 import com.sun.jersey.api.core.ApplicationAdapter;
 
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.ObjectStoreApplication;
 import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
@@ -72,8 +71,8 @@ public final class ObjectStoreHandler implements Closeable {
       LoggerFactory.getLogger(ObjectStoreHandler.class);
 
   private final ObjectStoreJerseyContainer objectStoreJerseyContainer;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB
-      keySpaceManagerClient;
+  private final OzoneManagerProtocolClientSideTranslatorPB
+      ozoneManagerClient;
   private final StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
   private final ScmBlockLocationProtocolClientSideTranslatorPB
@@ -119,28 +118,28 @@ public final class ObjectStoreHandler implements Closeable {
                   NetUtils.getDefaultSocketFactory(conf),
                   Client.getRpcTimeout(conf)));
 
-      RPC.setProtocolEngine(conf, KeySpaceManagerProtocolPB.class,
+      RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
           ProtobufRpcEngine.class);
-      long ksmVersion =
-          RPC.getProtocolVersion(KeySpaceManagerProtocolPB.class);
-      InetSocketAddress ksmAddress = getKsmAddress(conf);
-      this.keySpaceManagerClient =
-          new KeySpaceManagerProtocolClientSideTranslatorPB(
-              RPC.getProxy(KeySpaceManagerProtocolPB.class, ksmVersion,
-              ksmAddress, UserGroupInformation.getCurrentUser(), conf,
+      long omVersion =
+          RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
+      InetSocketAddress omAddress = getOmAddress(conf);
+      this.ozoneManagerClient =
+          new OzoneManagerProtocolClientSideTranslatorPB(
+              RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
+                  omAddress, UserGroupInformation.getCurrentUser(), conf,
               NetUtils.getDefaultSocketFactory(conf),
               Client.getRpcTimeout(conf)));
 
       storageHandler = new DistributedStorageHandler(
           new OzoneConfiguration(conf),
           this.storageContainerLocationClient,
-          this.keySpaceManagerClient);
+          this.ozoneManagerClient);
     } else {
       if (OzoneConsts.OZONE_HANDLER_LOCAL.equalsIgnoreCase(shType)) {
         storageHandler = new LocalStorageHandler(conf);
         this.storageContainerLocationClient = null;
         this.scmBlockLocationClient = null;
-        this.keySpaceManagerClient = null;
+        this.ozoneManagerClient = null;
       } else {
         throw new IllegalArgumentException(
             String.format("Unrecognized value for %s: %s,"
@@ -186,6 +185,6 @@ public final class ObjectStoreHandler implements Closeable {
     storageHandler.close();
     IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
     IOUtils.cleanupWithLogger(LOG, scmBlockLocationClient);
-    IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient);
+    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
index ef0293e..ad48787 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.web.handlers;
 import org.apache.commons.codec.binary.Base64;
 
 import org.apache.hadoop.ozone.OzoneRestUtils;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.client.rest.headers.Header;
@@ -102,7 +102,7 @@ public abstract class KeyProcessTemplate {
       LOG.error("IOException:", fsExp);
       // Map KEY_NOT_FOUND to INVALID_KEY
       if (fsExp.getMessage().endsWith(
-          KeySpaceManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) {
+          OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) {
         throw ErrorTable.newError(ErrorTable.INVALID_KEY, userArgs, fsExp);
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
index 1d98400..fb95bb9 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
@@ -30,7 +30,7 @@ import java.nio.file.NoSuchFileException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ozone.OzoneRestUtils;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.interfaces.UserAuth;
@@ -135,7 +135,7 @@ public abstract class VolumeProcessTemplate {
     OzoneException exp = null;
 
     if ((fsExp != null && fsExp.getMessage().endsWith(
-        KeySpaceManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name()))
+        OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name()))
         || fsExp instanceof FileAlreadyExistsException) {
       exp = ErrorTable
           .newError(ErrorTable.VOLUME_ALREADY_EXISTS, reqID, volume, hostName);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HDDS-212. Introduce NodeStateManager to manage the state of Datanodes in SCM. Contributed by Nanda kumar.

Posted by vi...@apache.org.
HDDS-212. Introduce NodeStateManager to manage the state of Datanodes in SCM. Contributed by Nanda kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71df8c27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71df8c27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71df8c27

Branch: refs/heads/HDFS-12090
Commit: 71df8c27c9a0e326232d3baf16414a63b5ea5a4b
Parents: 3b63715
Author: Nanda kumar <na...@apache.org>
Authored: Thu Jul 5 02:11:10 2018 +0530
Committer: Nanda kumar <na...@apache.org>
Committed: Thu Jul 5 02:11:10 2018 +0530

----------------------------------------------------------------------
 .../scm/client/ContainerOperationClient.java    |   8 +-
 .../hadoop/hdds/protocol/DatanodeDetails.java   |  13 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   4 -
 .../hadoop/hdds/scm/client/ScmClient.java       |   5 +-
 .../StorageContainerLocationProtocol.java       |   5 +-
 ...rLocationProtocolClientSideTranslatorPB.java |   8 +-
 ...rLocationProtocolServerSideTranslatorPB.java |   8 +-
 .../StorageContainerLocationProtocol.proto      |  19 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |  13 +-
 .../common/src/main/resources/ozone-default.xml |  11 -
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |  11 -
 .../protocol/StorageContainerNodeProtocol.java  |   4 +-
 .../hadoop/hdds/scm/node/DatanodeInfo.java      | 109 ++++
 .../hdds/scm/node/HeartbeatQueueItem.java       |  98 ----
 .../hadoop/hdds/scm/node/NodeManager.java       |  16 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java  | 575 +++++++++++++++++++
 .../hadoop/hdds/scm/node/SCMNodeManager.java    | 506 ++--------------
 .../node/states/NodeAlreadyExistsException.java |  45 ++
 .../hdds/scm/node/states/NodeException.java     |  44 ++
 .../scm/node/states/NodeNotFoundException.java  |  49 ++
 .../hdds/scm/node/states/NodeStateMap.java      | 281 +++++++++
 .../scm/server/SCMClientProtocolServer.java     |  60 +-
 .../server/SCMDatanodeHeartbeatDispatcher.java  |   2 +-
 .../scm/server/SCMDatanodeProtocolServer.java   |   2 +-
 .../hdds/scm/container/MockNodeManager.java     |  58 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  10 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 176 ++----
 .../testutils/ReplicationNodeManagerMock.java   |  37 +-
 .../ozone/TestStorageContainerManager.java      |   4 +-
 .../hadoop/ozone/scm/node/TestQueryNode.java    |  19 +-
 .../hadoop/ozone/ksm/KeySpaceManager.java       |   6 +-
 31 files changed, 1288 insertions(+), 918 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index b04f8c4..e7bdaf0 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -37,7 +37,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.EnumSet;
 import java.util.List;
 import java.util.UUID;
 
@@ -234,14 +233,14 @@ public class ContainerOperationClient implements ScmClient {
   /**
    * Returns a set of Nodes that meet a query criteria.
    *
-   * @param nodeStatuses - A set of criteria that we want the node to have.
+   * @param nodeStatuses - Criteria that we want the node to have.
    * @param queryScope - Query scope - Cluster or pool.
    * @param poolName - if it is pool, a pool name is required.
    * @return A set of nodes that meet the requested criteria.
    * @throws IOException
    */
   @Override
-  public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
+  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
       nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
       throws IOException {
     return storageContainerLocationClient.queryNode(nodeStatuses, queryScope,
@@ -458,7 +457,8 @@ public class ContainerOperationClient implements ScmClient {
    */
   @Override
   public long getContainerSize(long containerID) throws IOException {
-    // TODO : Fix this, it currently returns the capacity but not the current usage.
+    // TODO : Fix this, it currently returns the capacity
+    // but not the current usage.
     long size = getContainerSizeB();
     if (size == -1) {
       throw new IOException("Container size unknown!");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index c373e22..bae22a2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -35,7 +35,7 @@ import java.util.UUID;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
-public final class DatanodeDetails implements Comparable<DatanodeDetails> {
+public class DatanodeDetails implements Comparable<DatanodeDetails> {
 
   /**
    * DataNode's unique identifier in the cluster.
@@ -63,6 +63,13 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
     this.ports = ports;
   }
 
+  protected DatanodeDetails(DatanodeDetails datanodeDetails) {
+    this.uuid = datanodeDetails.uuid;
+    this.ipAddress = datanodeDetails.ipAddress;
+    this.hostName = datanodeDetails.hostName;
+    this.ports = datanodeDetails.ports;
+  }
+
   /**
    * Returns the DataNode UUID.
    *
@@ -238,7 +245,7 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
   /**
    * Builder class for building DatanodeDetails.
    */
-  public static class Builder {
+  public static final class Builder {
     private String id;
     private String ipAddress;
     private String hostName;
@@ -324,7 +331,7 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> {
   /**
    * Container to hold DataNode Port details.
    */
-  public static class Port {
+  public static final class Port {
 
     /**
      * Ports that are supported in DataNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index df6fbf0..ad326dc 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -165,10 +165,6 @@ public final class ScmConfigKeys {
   public static final String OZONE_SCM_DEADNODE_INTERVAL_DEFAULT =
       "10m";
 
-  public static final String OZONE_SCM_MAX_HB_COUNT_TO_PROCESS =
-      "ozone.scm.max.hb.count.to.process";
-  public static final int OZONE_SCM_MAX_HB_COUNT_TO_PROCESS_DEFAULT = 5000;
-
   public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL =
       "ozone.scm.heartbeat.thread.interval";
   public static final String OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index ecb2173..7955179 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 
 import java.io.IOException;
-import java.util.EnumSet;
 import java.util.List;
 
 /**
@@ -150,13 +149,13 @@ public interface ScmClient {
 
   /**
    * Returns a set of Nodes that meet a query criteria.
-   * @param nodeStatuses - A set of criteria that we want the node to have.
+   * @param nodeStatuses - Criteria that we want the node to have.
    * @param queryScope - Query scope - Cluster or pool.
    * @param poolName - if it is pool, a pool name is required.
    * @return A set of nodes that meet the requested criteria.
    * @throws IOException
    */
-  HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses,
+  List<HddsProtos.Node> queryNode(HddsProtos.NodeState nodeStatuses,
       HddsProtos.QueryScope queryScope, String poolName) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index b787409..581fbd0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
 
 import java.io.IOException;
-import java.util.EnumSet;
 import java.util.List;
 
 /**
@@ -94,10 +93,10 @@ public interface StorageContainerLocationProtocol {
 
   /**
    *  Queries a list of Node Statuses.
-   * @param nodeStatuses
+   * @param state
    * @return List of Datanodes.
    */
-  HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses,
+  List<HddsProtos.Node> queryNode(HddsProtos.NodeState state,
       HddsProtos.QueryScope queryScope, String poolName) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 4b03d12..ac12ea2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -59,7 +59,6 @@ import org.apache.hadoop.ipc.RPC;
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.EnumSet;
 import java.util.List;
 
 /**
@@ -215,20 +214,19 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
    * @return List of Datanodes.
    */
   @Override
-  public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
+  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState
       nodeStatuses, HddsProtos.QueryScope queryScope, String poolName)
       throws IOException {
     // TODO : We support only cluster wide query right now. So ignoring checking
     // queryScope and poolName
     Preconditions.checkNotNull(nodeStatuses);
-    Preconditions.checkState(nodeStatuses.size() > 0);
     NodeQueryRequestProto request = NodeQueryRequestProto.newBuilder()
-        .addAllQuery(nodeStatuses)
+        .setState(nodeStatuses)
         .setScope(queryScope).setPoolName(poolName).build();
     try {
       NodeQueryResponseProto response =
           rpcProxy.queryNode(NULL_RPC_CONTROLLER, request);
-      return response.getDatanodes();
+      return response.getDatanodesList();
     } catch (ServiceException e) {
       throw  ProtobufHelper.getRemoteException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
index d66919f..9175ebf 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -57,7 +57,6 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
 
 import java.io.IOException;
-import java.util.EnumSet;
 import java.util.List;
 
 /**
@@ -171,13 +170,12 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
       StorageContainerLocationProtocolProtos.NodeQueryRequestProto request)
       throws ServiceException {
     try {
-      EnumSet<HddsProtos.NodeState> nodeStateEnumSet = EnumSet.copyOf(request
-          .getQueryList());
-      HddsProtos.NodePool datanodes = impl.queryNode(nodeStateEnumSet,
+      HddsProtos.NodeState nodeState = request.getState();
+      List<HddsProtos.Node> datanodes = impl.queryNode(nodeState,
           request.getScope(), request.getPoolName());
       return StorageContainerLocationProtocolProtos
           .NodeQueryResponseProto.newBuilder()
-          .setDatanodes(datanodes)
+          .addAllDatanodes(datanodes)
           .build();
     } catch (Exception e) {
       throw new ServiceException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index 143c2ae..68cc35f 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -118,26 +118,13 @@ message ObjectStageChangeResponseProto {
  match the NodeState that we are requesting.
 */
 message NodeQueryRequestProto {
-
-
-  // Repeated, So we can specify more than one status type.
-  // These NodeState types are additive for now, in the sense that
-  // if you specify HEALTHY and FREE_NODE members --
-  // Then you get all healthy node which are not raft members.
-  //
-  // if you specify all healthy and dead nodes, you will get nothing
-  // back. Server is not going to dictate what combinations make sense,
-  // it is entirely up to the caller.
-  // TODO: Support operators like OR and NOT. Currently it is always an
-  // implied AND.
-
-  repeated NodeState query = 1;
+  required NodeState state = 1;
   required QueryScope scope = 2;
   optional string poolName = 3; // if scope is pool, then pool name is needed.
 }
 
 message NodeQueryResponseProto {
-  required NodePool datanodes = 1;
+  repeated Node datanodes = 1;
 }
 
 /**
@@ -194,7 +181,7 @@ service StorageContainerLocationProtocolService {
   /**
   * Returns a set of Nodes that meet a criteria.
   */
-  rpc queryNode(NodeQueryRequestProto)  returns (NodeQueryResponseProto);
+  rpc queryNode(NodeQueryRequestProto) returns (NodeQueryResponseProto);
 
   /**
   * Notify from client when begin or finish container or pipeline operations on datanodes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
index 1c9ee19..b9def2a 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -69,14 +69,11 @@ enum NodeType {
  * and getNodeCount.
  */
 enum NodeState {
-    HEALTHY             = 1;
-    STALE               = 2;
-    DEAD                = 3;
-    DECOMMISSIONING     = 4;
-    DECOMMISSIONED      = 5;
-    RAFT_MEMBER         = 6;
-    FREE_NODE           = 7; // Not a member in raft.
-    INVALID             = 8;
+    HEALTHY = 1;
+    STALE = 2;
+    DEAD = 3;
+    DECOMMISSIONING = 4;
+    DECOMMISSIONED = 5;
 }
 
 enum QueryScope {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 25365c8..568d267 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -774,17 +774,6 @@
     </description>
   </property>
   <property>
-    <name>ozone.scm.max.hb.count.to.process</name>
-    <value>5000</value>
-    <tag>OZONE, MANAGEMENT, PERFORMANCE</tag>
-    <description>
-      The maximum number of heartbeat to process per loop of the
-      heartbeat process thread. Please see
-      ozone.scm.heartbeat.thread.interval
-      for more info.
-    </description>
-  </property>
-  <property>
     <name>ozone.scm.names</name>
     <value/>
     <tag>OZONE</tag>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
index c734d9b..cc7adbf 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
@@ -259,17 +259,6 @@ public final class HddsServerUtil {
   }
 
   /**
-   * Returns the maximum number of heartbeat to process per loop of the process
-   * thread.
-   * @param conf Configuration
-   * @return - int -- Number of HBs to process
-   */
-  public static int getMaxHBToProcessPerLoop(Configuration conf) {
-    return conf.getInt(ScmConfigKeys.OZONE_SCM_MAX_HB_COUNT_TO_PROCESS,
-        ScmConfigKeys.OZONE_SCM_MAX_HB_COUNT_TO_PROCESS_DEFAULT);
-  }
-
-  /**
    * Timeout value for the RPC from Datanode to SCM, primarily used for
    * Heartbeats and container reports.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
index 790f58a..c9ef43f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
@@ -59,10 +59,8 @@ public interface StorageContainerNodeProtocol {
   /**
    * Send heartbeat to indicate the datanode is alive and doing well.
    * @param datanodeDetails - Datanode ID.
-   * @param nodeReport - node report.
    * @return SCMheartbeat response list
    */
-  List<SCMCommand> sendHeartbeat(DatanodeDetails datanodeDetails,
-      NodeReportProto nodeReport);
+  List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails);
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
new file mode 100644
index 0000000..51465ee
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
+import org.apache.hadoop.util.Time;
+
+import java.util.List;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * This class extends the primary identifier of a Datanode with ephemeral
+ * state, eg last reported time, usage information etc.
+ */
+public class DatanodeInfo extends DatanodeDetails {
+
+  private final ReadWriteLock lock;
+
+  private volatile long lastHeartbeatTime;
+  private long lastStatsUpdatedTime;
+
+  // If required we can dissect StorageReportProto and store the raw data
+  private List<StorageReportProto> storageReports;
+
+  /**
+   * Constructs DatanodeInfo from DatanodeDetails.
+   *
+   * @param datanodeDetails Details about the datanode
+   */
+  public DatanodeInfo(DatanodeDetails datanodeDetails) {
+    super(datanodeDetails);
+    lock = new ReentrantReadWriteLock();
+    lastHeartbeatTime = Time.monotonicNow();
+  }
+
+  /**
+   * Updates the last heartbeat time with current time.
+   */
+  public void updateLastHeartbeatTime() {
+    try {
+      lock.writeLock().lock();
+      lastHeartbeatTime = Time.monotonicNow();
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Returns the last heartbeat time.
+   *
+   * @return last heartbeat time.
+   */
+  public long getLastHeartbeatTime() {
+    try {
+      lock.readLock().lock();
+      return lastHeartbeatTime;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Updates the datanode storage reports.
+   *
+   * @param reports list of storage report
+   */
+  public void updateStorageReports(List<StorageReportProto> reports) {
+    try {
+      lock.writeLock().lock();
+      lastStatsUpdatedTime = Time.monotonicNow();
+      storageReports = reports;
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Returns the storage reports associated with this datanode.
+   *
+   * @return list of storage report
+   */
+  public List<StorageReportProto> getStorageReports() {
+    try {
+      lock.readLock().lock();
+      return storageReports;
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
deleted file mode 100644
index 04658bd..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.node;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-
-import static org.apache.hadoop.util.Time.monotonicNow;
-
-/**
- * This class represents the item in SCM heartbeat queue.
- */
-public class HeartbeatQueueItem {
-  private DatanodeDetails datanodeDetails;
-  private long recvTimestamp;
-  private NodeReportProto nodeReport;
-
-  /**
-   *
-   * @param datanodeDetails - datanode ID of the heartbeat.
-   * @param recvTimestamp - heartbeat receive timestamp.
-   * @param nodeReport - node report associated with the heartbeat if any.
-   */
-  HeartbeatQueueItem(DatanodeDetails datanodeDetails, long recvTimestamp,
-      NodeReportProto nodeReport) {
-    this.datanodeDetails = datanodeDetails;
-    this.recvTimestamp = recvTimestamp;
-    this.nodeReport = nodeReport;
-  }
-
-  /**
-   * @return datanode ID.
-   */
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-
-  /**
-   * @return node report.
-   */
-  public NodeReportProto getNodeReport() {
-    return nodeReport;
-  }
-
-  /**
-   * @return heartbeat receive timestamp.
-   */
-  public long getRecvTimestamp() {
-    return recvTimestamp;
-  }
-
-  /**
-   * Builder for HeartbeatQueueItem.
-   */
-  public static class Builder {
-    private DatanodeDetails datanodeDetails;
-    private NodeReportProto nodeReport;
-    private long recvTimestamp = monotonicNow();
-
-    public Builder setDatanodeDetails(DatanodeDetails dnDetails) {
-      this.datanodeDetails = dnDetails;
-      return this;
-    }
-
-    public Builder setNodeReport(NodeReportProto report) {
-      this.nodeReport = report;
-      return this;
-    }
-
-    @VisibleForTesting
-    public Builder setRecvTimestamp(long recvTime) {
-      this.recvTimestamp = recvTime;
-      return this;
-    }
-
-    public HeartbeatQueueItem build() {
-      return new HeartbeatQueueItem(datanodeDetails, recvTimestamp, nodeReport);
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 72d7e94..c13c37c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -17,10 +17,9 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
@@ -54,14 +53,14 @@ import java.util.UUID;
  * list, by calling removeNode. We will throw away this nodes info soon.
  */
 public interface NodeManager extends StorageContainerNodeProtocol,
-    NodeManagerMXBean, Closeable, Runnable {
+    NodeManagerMXBean, Closeable {
   /**
    * Removes a data node from the management of this Node Manager.
    *
    * @param node - DataNode.
-   * @throws UnregisteredNodeException
+   * @throws NodeNotFoundException
    */
-  void removeNode(DatanodeDetails node) throws UnregisteredNodeException;
+  void removeNode(DatanodeDetails node) throws NodeNotFoundException;
 
   /**
    * Gets all Live Datanodes that is currently communicating with SCM.
@@ -124,13 +123,6 @@ public interface NodeManager extends StorageContainerNodeProtocol,
   SCMNodeMetric getNodeStat(DatanodeDetails datanodeDetails);
 
   /**
-   * Wait for the heartbeat is processed by NodeManager.
-   * @return true if heartbeat has been processed.
-   */
-  @VisibleForTesting
-  boolean waitForHeartbeatProcessed();
-
-  /**
    * Returns the node state of a specific node.
    * @param datanodeDetails DatanodeDetails
    * @return Healthy/Stale/Dead.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
new file mode 100644
index 0000000..5543c04
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -0,0 +1,575 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node;
+
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.node.states.NodeStateMap;
+import org.apache.hadoop.ozone.common.statemachine
+    .InvalidStateTransitionException;
+import org.apache.hadoop.ozone.common.statemachine.StateMachine;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_DEADNODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+    .OZONE_SCM_STALENODE_INTERVAL;
+
+/**
+ * NodeStateManager maintains the state of all the datanodes in the cluster. All
+ * the node state change should happen only via NodeStateManager. It also
+ * runs a heartbeat thread which periodically updates the node state.
+ * <p>
+ * The getNode(byState) functions make copy of node maps and then creates a list
+ * based on that. It should be assumed that these get functions always report
+ * *stale* information. For example, getting the deadNodeCount followed by
+ * getNodes(DEAD) could very well produce totally different count. Also
+ * getNodeCount(HEALTHY) + getNodeCount(DEAD) + getNodeCode(STALE), is not
+ * guaranteed to add up to the total nodes that we know off. Please treat all
+ * get functions in this file as a snap-shot of information that is inconsistent
+ * as soon as you read it.
+ */
+public class NodeStateManager implements Runnable, Closeable {
+
+  /**
+   * Node's life cycle events.
+   */
+  private enum NodeLifeCycleEvent {
+    TIMEOUT, RESTORE, RESURRECT, DECOMMISSION, DECOMMISSIONED
+  }
+
+  private static final Logger LOG = LoggerFactory
+      .getLogger(NodeStateManager.class);
+
+  /**
+   * StateMachine for node lifecycle.
+   */
+  private final StateMachine<NodeState, NodeLifeCycleEvent> stateMachine;
+  /**
+   * This is the map which maintains the current state of all datanodes.
+   */
+  private final NodeStateMap nodeStateMap;
+  /**
+   * ExecutorService used for scheduling heartbeat processing thread.
+   */
+  private final ScheduledExecutorService executorService;
+  /**
+   * The frequency in which we have run the heartbeat processing thread.
+   */
+  private final long heartbeatCheckerIntervalMs;
+  /**
+   * The timeout value which will be used for marking a datanode as stale.
+   */
+  private final long staleNodeIntervalMs;
+  /**
+   * The timeout value which will be used for marking a datanode as dead.
+   */
+  private final long deadNodeIntervalMs;
+
+  /**
+   * Constructs a NodeStateManager instance with the given configuration.
+   *
+   * @param conf Configuration
+   */
+  public NodeStateManager(Configuration conf) {
+    nodeStateMap = new NodeStateMap();
+    Set<NodeState> finalStates = new HashSet<>();
+    finalStates.add(NodeState.DECOMMISSIONED);
+    this.stateMachine = new StateMachine<>(NodeState.HEALTHY, finalStates);
+    initializeStateMachine();
+    heartbeatCheckerIntervalMs = HddsServerUtil
+        .getScmheartbeatCheckerInterval(conf);
+    staleNodeIntervalMs = HddsServerUtil.getStaleNodeInterval(conf);
+    deadNodeIntervalMs = HddsServerUtil.getDeadNodeInterval(conf);
+    Preconditions.checkState(heartbeatCheckerIntervalMs > 0,
+        OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL + " should be greater than 0.");
+    Preconditions.checkState(staleNodeIntervalMs < deadNodeIntervalMs,
+        OZONE_SCM_STALENODE_INTERVAL + " should be less than" +
+            OZONE_SCM_DEADNODE_INTERVAL);
+    executorService = HadoopExecutors.newScheduledThreadPool(1,
+        new ThreadFactoryBuilder().setDaemon(true)
+            .setNameFormat("SCM Heartbeat Processing Thread - %d").build());
+    executorService.schedule(this, heartbeatCheckerIntervalMs,
+        TimeUnit.MILLISECONDS);
+  }
+
+  /*
+   *
+   * Node and State Transition Mapping:
+   *
+   * State: HEALTHY         -------------------> STALE
+   * Event:                       TIMEOUT
+   *
+   * State: STALE           -------------------> DEAD
+   * Event:                       TIMEOUT
+   *
+   * State: STALE           -------------------> HEALTHY
+   * Event:                       RESTORE
+   *
+   * State: DEAD            -------------------> HEALTHY
+   * Event:                       RESURRECT
+   *
+   * State: HEALTHY         -------------------> DECOMMISSIONING
+   * Event:                     DECOMMISSION
+   *
+   * State: STALE           -------------------> DECOMMISSIONING
+   * Event:                     DECOMMISSION
+   *
+   * State: DEAD            -------------------> DECOMMISSIONING
+   * Event:                     DECOMMISSION
+   *
+   * State: DECOMMISSIONING -------------------> DECOMMISSIONED
+   * Event:                     DECOMMISSIONED
+   *
+   *  Node State Flow
+   *
+   *  +--------------------------------------------------------+
+   *  |                                     (RESURRECT)        |
+   *  |   +--------------------------+                         |
+   *  |   |      (RESTORE)           |                         |
+   *  |   |                          |                         |
+   *  V   V                          |                         |
+   * [HEALTHY]------------------->[STALE]------------------->[DEAD]
+   *    |         (TIMEOUT)          |         (TIMEOUT)       |
+   *    |                            |                         |
+   *    |                            |                         |
+   *    |                            |                         |
+   *    |                            |                         |
+   *    | (DECOMMISSION)             | (DECOMMISSION)          | (DECOMMISSION)
+   *    |                            V                         |
+   *    +------------------->[DECOMMISSIONING]<----------------+
+   *                                 |
+   *                                 | (DECOMMISSIONED)
+   *                                 |
+   *                                 V
+   *                          [DECOMMISSIONED]
+   *
+   */
+
+  /**
+   * Initializes the lifecycle of node state machine.
+   */
+  private void initializeStateMachine() {
+    stateMachine.addTransition(
+        NodeState.HEALTHY, NodeState.STALE, NodeLifeCycleEvent.TIMEOUT);
+    stateMachine.addTransition(
+        NodeState.STALE, NodeState.DEAD, NodeLifeCycleEvent.TIMEOUT);
+    stateMachine.addTransition(
+        NodeState.STALE, NodeState.HEALTHY, NodeLifeCycleEvent.RESTORE);
+    stateMachine.addTransition(
+        NodeState.DEAD, NodeState.HEALTHY, NodeLifeCycleEvent.RESURRECT);
+    stateMachine.addTransition(
+        NodeState.HEALTHY, NodeState.DECOMMISSIONING,
+        NodeLifeCycleEvent.DECOMMISSION);
+    stateMachine.addTransition(
+        NodeState.STALE, NodeState.DECOMMISSIONING,
+        NodeLifeCycleEvent.DECOMMISSION);
+    stateMachine.addTransition(
+        NodeState.DEAD, NodeState.DECOMMISSIONING,
+        NodeLifeCycleEvent.DECOMMISSION);
+    stateMachine.addTransition(
+        NodeState.DECOMMISSIONING, NodeState.DECOMMISSIONED,
+        NodeLifeCycleEvent.DECOMMISSIONED);
+
+  }
+
+  /**
+   * Adds a new node to the state manager.
+   *
+   * @param datanodeDetails DatanodeDetails
+   *
+   * @throws NodeAlreadyExistsException if the node is already present
+   */
+  public void addNode(DatanodeDetails datanodeDetails)
+      throws NodeAlreadyExistsException {
+    nodeStateMap.addNode(datanodeDetails, stateMachine.getInitialState());
+  }
+
+  /**
+   * Get information about the node.
+   *
+   * @param datanodeDetails DatanodeDetails
+   *
+   * @return DatanodeInfo
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public DatanodeInfo getNode(DatanodeDetails datanodeDetails)
+      throws NodeNotFoundException {
+    return nodeStateMap.getNodeInfo(datanodeDetails.getUuid());
+  }
+
+  /**
+   * Updates the last heartbeat time of the node.
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public void updateLastHeartbeatTime(DatanodeDetails datanodeDetails)
+      throws NodeNotFoundException {
+    nodeStateMap.getNodeInfo(datanodeDetails.getUuid())
+        .updateLastHeartbeatTime();
+  }
+
+  /**
+   * Returns the current state of the node.
+   *
+   * @param datanodeDetails DatanodeDetails
+   *
+   * @return NodeState
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public NodeState getNodeState(DatanodeDetails datanodeDetails)
+      throws NodeNotFoundException {
+    return nodeStateMap.getNodeState(datanodeDetails.getUuid());
+  }
+
+  /**
+   * Returns all the node which are in healthy state.
+   *
+   * @return list of healthy nodes
+   */
+  public List<DatanodeDetails> getHealthyNodes() {
+    return getNodes(NodeState.HEALTHY);
+  }
+
+  /**
+   * Returns all the node which are in stale state.
+   *
+   * @return list of stale nodes
+   */
+  public List<DatanodeDetails> getStaleNodes() {
+    return getNodes(NodeState.STALE);
+  }
+
+  /**
+   * Returns all the node which are in dead state.
+   *
+   * @return list of dead nodes
+   */
+  public List<DatanodeDetails> getDeadNodes() {
+    return getNodes(NodeState.DEAD);
+  }
+
+  /**
+   * Returns all the node which are in the specified state.
+   *
+   * @param state NodeState
+   *
+   * @return list of nodes
+   */
+  public List<DatanodeDetails> getNodes(NodeState state) {
+    List<DatanodeDetails> nodes = new LinkedList<>();
+    nodeStateMap.getNodes(state).forEach(
+        uuid -> {
+          try {
+            nodes.add(nodeStateMap.getNodeDetails(uuid));
+          } catch (NodeNotFoundException e) {
+            // This should not happen unless someone else other than
+            // NodeStateManager is directly modifying NodeStateMap and removed
+            // the node entry after we got the list of UUIDs.
+            LOG.error("Inconsistent NodeStateMap! " + nodeStateMap);
+          }
+        });
+    return nodes;
+  }
+
+  /**
+   * Returns all the nodes which have registered to NodeStateManager.
+   *
+   * @return all the managed nodes
+   */
+  public List<DatanodeDetails> getAllNodes() {
+    List<DatanodeDetails> nodes = new LinkedList<>();
+    nodeStateMap.getAllNodes().forEach(
+        uuid -> {
+          try {
+            nodes.add(nodeStateMap.getNodeDetails(uuid));
+          } catch (NodeNotFoundException e) {
+            // This should not happen unless someone else other than
+            // NodeStateManager is directly modifying NodeStateMap and removed
+            // the node entry after we got the list of UUIDs.
+            LOG.error("Inconsistent NodeStateMap! " + nodeStateMap);
+          }
+        });
+    return nodes;
+  }
+
+  /**
+   * Returns the count of healthy nodes.
+   *
+   * @return healthy node count
+   */
+  public int getHealthyNodeCount() {
+    return getNodeCount(NodeState.HEALTHY);
+  }
+
+  /**
+   * Returns the count of stale nodes.
+   *
+   * @return stale node count
+   */
+  public int getStaleNodeCount() {
+    return getNodeCount(NodeState.STALE);
+  }
+
+  /**
+   * Returns the count of dead nodes.
+   *
+   * @return dead node count
+   */
+  public int getDeadNodeCount() {
+    return getNodeCount(NodeState.DEAD);
+  }
+
+  /**
+   * Returns the count of nodes in specified state.
+   *
+   * @param state NodeState
+   *
+   * @return node count
+   */
+  public int getNodeCount(NodeState state) {
+    return nodeStateMap.getNodeCount(state);
+  }
+
+  /**
+   * Returns the count of all nodes managed by NodeStateManager.
+   *
+   * @return node count
+   */
+  public int getTotalNodeCount() {
+    return nodeStateMap.getTotalNodeCount();
+  }
+
+  /**
+   * Removes a node from NodeStateManager.
+   *
+   * @param datanodeDetails DatanodeDetails
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public void removeNode(DatanodeDetails datanodeDetails)
+      throws NodeNotFoundException {
+    nodeStateMap.removeNode(datanodeDetails.getUuid());
+  }
+
+  /**
+   * Move Stale or Dead node to healthy if we got a heartbeat from them.
+   * Move healthy nodes to stale nodes if it is needed.
+   * Move Stales node to dead if needed.
+   *
+   * @see Thread#run()
+   */
+  @Override
+  public void run() {
+
+    /*
+     *
+     *          staleNodeDeadline                healthyNodeDeadline
+     *                 |                                  |
+     *      Dead       |             Stale                |     Healthy
+     *      Node       |             Node                 |     Node
+     *      Window     |             Window               |     Window
+     * ----------------+----------------------------------+------------------->
+     *                      >>-->> time-line >>-->>
+     *
+     * Here is the logic of computing the health of a node.
+     *
+     * 1. We get the current time and look back that the time
+     *    when we got a heartbeat from a node.
+     * 
+     * 2. If the last heartbeat was within the window of healthy node we mark
+     *    it as healthy.
+     * 
+     * 3. If the last HB Time stamp is longer and falls within the window of
+     *    Stale Node time, we will mark it as Stale.
+     * 
+     * 4. If the last HB time is older than the Stale Window, then the node is
+     *    marked as dead.
+     *
+     * The Processing starts from current time and looks backwards in time.
+     */
+    long processingStartTime = Time.monotonicNow();
+    // After this time node is considered to be stale.
+    long healthyNodeDeadline = processingStartTime - staleNodeIntervalMs;
+    // After this time node is considered to be dead.
+    long staleNodeDeadline = processingStartTime - deadNodeIntervalMs;
+
+    Predicate<Long> healthyNodeCondition =
+        (lastHbTime) -> lastHbTime >= healthyNodeDeadline;
+    // staleNodeCondition is superset of stale and dead node
+    Predicate<Long> staleNodeCondition =
+        (lastHbTime) -> lastHbTime < healthyNodeDeadline;
+    Predicate<Long> deadNodeCondition =
+        (lastHbTime) -> lastHbTime < staleNodeDeadline;
+    try {
+      for (NodeState state : NodeState.values()) {
+        List<UUID> nodes = nodeStateMap.getNodes(state);
+        for (UUID id : nodes) {
+          DatanodeInfo node = nodeStateMap.getNodeInfo(id);
+          switch (state) {
+          case HEALTHY:
+            // Move the node to STALE if the last heartbeat time is less than
+            // configured stale-node interval.
+            updateNodeState(node, staleNodeCondition, state,
+                  NodeLifeCycleEvent.TIMEOUT);
+            break;
+          case STALE:
+            // Move the node to DEAD if the last heartbeat time is less than
+            // configured dead-node interval.
+            updateNodeState(node, deadNodeCondition, state,
+                NodeLifeCycleEvent.TIMEOUT);
+            // Restore the node if we have received heartbeat before configured
+            // stale-node interval.
+            updateNodeState(node, healthyNodeCondition, state,
+                NodeLifeCycleEvent.RESTORE);
+            break;
+          case DEAD:
+            // Resurrect the node if we have received heartbeat before
+            // configured stale-node interval.
+            updateNodeState(node, healthyNodeCondition, state,
+                NodeLifeCycleEvent.RESURRECT);
+            break;
+            // We don't do anything for DECOMMISSIONING and DECOMMISSIONED in
+            // heartbeat processing.
+          case DECOMMISSIONING:
+          case DECOMMISSIONED:
+          default:
+          }
+        }
+      }
+    } catch (NodeNotFoundException e) {
+      // This should not happen unless someone else other than
+      // NodeStateManager is directly modifying NodeStateMap and removed
+      // the node entry after we got the list of UUIDs.
+      LOG.error("Inconsistent NodeStateMap! " + nodeStateMap);
+    }
+    long processingEndTime = Time.monotonicNow();
+    //If we have taken too much time for HB processing, log that information.
+    if ((processingEndTime - processingStartTime) >
+        heartbeatCheckerIntervalMs) {
+      LOG.error("Total time spend processing datanode HB's is greater than " +
+              "configured values for datanode heartbeats. Please adjust the" +
+              " heartbeat configs. Time Spend on HB processing: {} seconds " +
+              "Datanode heartbeat Interval: {} seconds.",
+          TimeUnit.MILLISECONDS
+              .toSeconds(processingEndTime - processingStartTime),
+          heartbeatCheckerIntervalMs);
+    }
+
+    // we purposefully make this non-deterministic. Instead of using a
+    // scheduleAtFixedFrequency  we will just go to sleep
+    // and wake up at the next rendezvous point, which is currentTime +
+    // heartbeatCheckerIntervalMs. This leads to the issue that we are now
+    // heart beating not at a fixed cadence, but clock tick + time taken to
+    // work.
+    //
+    // This time taken to work can skew the heartbeat processor thread.
+    // The reason why we don't care is because of the following reasons.
+    //
+    // 1. checkerInterval is general many magnitudes faster than datanode HB
+    // frequency.
+    //
+    // 2. if we have too much nodes, the SCM would be doing only HB
+    // processing, this could lead to SCM's CPU starvation. With this
+    // approach we always guarantee that  HB thread sleeps for a little while.
+    //
+    // 3. It is possible that we will never finish processing the HB's in the
+    // thread. But that means we have a mis-configured system. We will warn
+    // the users by logging that information.
+    //
+    // 4. And the most important reason, heartbeats are not blocked even if
+    // this thread does not run, they will go into the processing queue.
+
+    if (!Thread.currentThread().isInterrupted() &&
+        !executorService.isShutdown()) {
+      executorService.schedule(this, heartbeatCheckerIntervalMs,
+          TimeUnit.MILLISECONDS);
+    } else {
+      LOG.info("Current Thread is interrupted, shutting down HB processing " +
+          "thread for Node Manager.");
+    }
+
+  }
+
+  /**
+   * Updates the node state if the condition satisfies.
+   *
+   * @param node DatanodeInfo
+   * @param condition condition to check
+   * @param state current state of node
+   * @param lifeCycleEvent NodeLifeCycleEvent to be applied if condition
+   *                       matches
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  private void updateNodeState(DatanodeInfo node, Predicate<Long> condition,
+      NodeState state, NodeLifeCycleEvent lifeCycleEvent)
+      throws NodeNotFoundException {
+    try {
+      if (condition.test(node.getLastHeartbeatTime())) {
+        NodeState newState = stateMachine.getNextState(state, lifeCycleEvent);
+        nodeStateMap.updateNodeState(node.getUuid(), state, newState);
+      }
+    } catch (InvalidStateTransitionException e) {
+      LOG.warn("Invalid state transition of node {}." +
+              " Current state: {}, life cycle event: {}",
+          node, state, lifeCycleEvent);
+    }
+  }
+
+  @Override
+  public void close() {
+    executorService.shutdown();
+    try {
+      if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
+        executorService.shutdownNow();
+      }
+
+      if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
+        LOG.error("Unable to shutdown NodeStateManager properly.");
+      }
+    } catch (InterruptedException e) {
+      executorService.shutdownNow();
+      Thread.currentThread().interrupt();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index adca8ea..15ac3f2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.hdds.scm.node;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.hdds.scm.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
@@ -29,7 +29,6 @@ import org.apache.hadoop.hdds.server.events.Event;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.TypedEvent;
-import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -50,8 +49,6 @@ import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -63,39 +60,15 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Queue;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .INVALID;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
-import static org.apache.hadoop.util.Time.monotonicNow;
 
 /**
  * Maintains information about the Datanodes on SCM side.
  * <p>
  * Heartbeats under SCM is very simple compared to HDFS heartbeatManager.
  * <p>
- * Here we maintain 3 maps, and we propagate a node from healthyNodesMap to
- * staleNodesMap to deadNodesMap. This moving of a node from one map to another
- * is controlled by 4 configuration variables. These variables define how many
- * heartbeats must go missing for the node to move from one map to another.
- * <p>
- * Each heartbeat that SCMNodeManager receives is  put into heartbeatQueue. The
- * worker thread wakes up and grabs that heartbeat from the queue. The worker
- * thread will lookup the healthynodes map and set the timestamp if the entry
- * is there. if not it will look up stale and deadnodes map.
- * <p>
  * The getNode(byState) functions make copy of node maps and then creates a list
  * based on that. It should be assumed that these get functions always report
  * *stale* information. For example, getting the deadNodeCount followed by
@@ -113,33 +86,18 @@ public class SCMNodeManager
   static final Logger LOG =
       LoggerFactory.getLogger(SCMNodeManager.class);
 
-  /**
-   * Key = NodeID, value = timestamp.
-   */
-  private final ConcurrentHashMap<UUID, Long> healthyNodes;
-  private final ConcurrentHashMap<UUID, Long> staleNodes;
-  private final ConcurrentHashMap<UUID, Long> deadNodes;
-  private final Queue<HeartbeatQueueItem> heartbeatQueue;
-  private final ConcurrentHashMap<UUID, DatanodeDetails> nodes;
+
+  private final NodeStateManager nodeStateManager;
   // Individual live node stats
+  // TODO: NodeStat should be moved to NodeStatemanager (NodeStateMap)
   private final ConcurrentHashMap<UUID, SCMNodeStat> nodeStats;
+  // Should we maintain aggregated stats? If this is not frequently used, we
+  // can always calculate it from nodeStats whenever required.
   // Aggregated node stats
   private SCMNodeStat scmStat;
-  // TODO: expose nodeStats and scmStat as metrics
-  private final AtomicInteger healthyNodeCount;
-  private final AtomicInteger staleNodeCount;
-  private final AtomicInteger deadNodeCount;
-  private final AtomicInteger totalNodes;
-  private long staleNodeIntervalMs;
-  private final long deadNodeIntervalMs;
-  private final long heartbeatCheckerIntervalMs;
-  private final long datanodeHBIntervalSeconds;
-  private final ScheduledExecutorService executorService;
-  private long lastHBcheckStart;
-  private long lastHBcheckFinished = 0;
-  private long lastHBProcessedCount;
+  // Should we create ChillModeManager and extract all the chill mode logic
+  // to a new class?
   private int chillModeNodeCount;
-  private final int maxHBToProcessPerLoop;
   private final String clusterID;
   private final VersionInfo version;
   /**
@@ -168,47 +126,19 @@ public class SCMNodeManager
    */
   public SCMNodeManager(OzoneConfiguration conf, String clusterID,
       StorageContainerManager scmManager) throws IOException {
-    heartbeatQueue = new ConcurrentLinkedQueue<>();
-    healthyNodes = new ConcurrentHashMap<>();
-    deadNodes = new ConcurrentHashMap<>();
-    staleNodes = new ConcurrentHashMap<>();
-    nodes = new ConcurrentHashMap<>();
-    nodeStats = new ConcurrentHashMap<>();
-    scmStat = new SCMNodeStat();
-
-    healthyNodeCount = new AtomicInteger(0);
-    staleNodeCount = new AtomicInteger(0);
-    deadNodeCount = new AtomicInteger(0);
-    totalNodes = new AtomicInteger(0);
+    this.nodeStateManager = new NodeStateManager(conf);
+    this.nodeStats = new ConcurrentHashMap<>();
+    this.scmStat = new SCMNodeStat();
     this.clusterID = clusterID;
     this.version = VersionInfo.getLatestVersion();
-    commandQueue = new CommandQueue();
-
+    this.commandQueue = new CommandQueue();
     // TODO: Support this value as a Percentage of known machines.
-    chillModeNodeCount = 1;
-
-    staleNodeIntervalMs = HddsServerUtil.getStaleNodeInterval(conf);
-    deadNodeIntervalMs = HddsServerUtil.getDeadNodeInterval(conf);
-    heartbeatCheckerIntervalMs =
-        HddsServerUtil.getScmheartbeatCheckerInterval(conf);
-    datanodeHBIntervalSeconds = HddsServerUtil.getScmHeartbeatInterval(conf);
-    maxHBToProcessPerLoop = HddsServerUtil.getMaxHBToProcessPerLoop(conf);
-
-    executorService = HadoopExecutors.newScheduledThreadPool(1,
-        new ThreadFactoryBuilder().setDaemon(true)
-            .setNameFormat("SCM Heartbeat Processing Thread - %d").build());
-
-    LOG.info("Entering startup chill mode.");
+    this.chillModeNodeCount = 1;
     this.inStartupChillMode = new AtomicBoolean(true);
     this.inManualChillMode = new AtomicBoolean(false);
-
-    Preconditions.checkState(heartbeatCheckerIntervalMs > 0);
-    executorService.schedule(this, heartbeatCheckerIntervalMs,
-        TimeUnit.MILLISECONDS);
-
-    registerMXBean();
-
     this.scmManager = scmManager;
+    LOG.info("Entering startup chill mode.");
+    registerMXBean();
   }
 
   private void registerMXBean() {
@@ -227,12 +157,11 @@ public class SCMNodeManager
    * Removes a data node from the management of this Node Manager.
    *
    * @param node - DataNode.
-   * @throws UnregisteredNodeException
+   * @throws NodeNotFoundException
    */
   @Override
-  public void removeNode(DatanodeDetails node) {
-    // TODO : Fix me when adding the SCM CLI.
-
+  public void removeNode(DatanodeDetails node) throws NodeNotFoundException {
+    nodeStateManager.removeNode(node);
   }
 
   /**
@@ -244,31 +173,8 @@ public class SCMNodeManager
    * @return List of Datanodes that are known to SCM in the requested state.
    */
   @Override
-  public List<DatanodeDetails> getNodes(NodeState nodestate)
-      throws IllegalArgumentException {
-    Map<UUID, Long> set;
-    switch (nodestate) {
-    case HEALTHY:
-      synchronized (this) {
-        set = Collections.unmodifiableMap(new HashMap<>(healthyNodes));
-      }
-      break;
-    case STALE:
-      synchronized (this) {
-        set = Collections.unmodifiableMap(new HashMap<>(staleNodes));
-      }
-      break;
-    case DEAD:
-      synchronized (this) {
-        set = Collections.unmodifiableMap(new HashMap<>(deadNodes));
-      }
-      break;
-    default:
-      throw new IllegalArgumentException("Unknown node state requested.");
-    }
-
-    return set.entrySet().stream().map(entry -> nodes.get(entry.getKey()))
-        .collect(Collectors.toList());
+  public List<DatanodeDetails> getNodes(NodeState nodestate) {
+    return nodeStateManager.getNodes(nodestate);
   }
 
   /**
@@ -278,12 +184,7 @@ public class SCMNodeManager
    */
   @Override
   public List<DatanodeDetails> getAllNodes() {
-    Map<UUID, DatanodeDetails> set;
-    synchronized (this) {
-      set = Collections.unmodifiableMap(new HashMap<>(nodes));
-    }
-    return set.entrySet().stream().map(entry -> nodes.get(entry.getKey()))
-        .collect(Collectors.toList());
+    return nodeStateManager.getAllNodes();
   }
 
   /**
@@ -315,14 +216,16 @@ public class SCMNodeManager
     if (inStartupChillMode.get()) {
       return "Still in chill mode, waiting on nodes to report in." +
           String.format(" %d nodes reported, minimal %d nodes required.",
-              totalNodes.get(), getMinimumChillModeNodes());
+              nodeStateManager.getTotalNodeCount(), getMinimumChillModeNodes());
     }
     if (inManualChillMode.get()) {
       return "Out of startup chill mode, but in manual chill mode." +
-          String.format(" %d nodes have reported in.", totalNodes.get());
+          String.format(" %d nodes have reported in.",
+              nodeStateManager.getTotalNodeCount());
     }
     return "Out of chill mode." +
-        String.format(" %d nodes have reported in.", totalNodes.get());
+        String.format(" %d nodes have reported in.",
+            nodeStateManager.getTotalNodeCount());
   }
 
   /**
@@ -376,33 +279,7 @@ public class SCMNodeManager
    */
   @Override
   public int getNodeCount(NodeState nodestate) {
-    switch (nodestate) {
-    case HEALTHY:
-      return healthyNodeCount.get();
-    case STALE:
-      return staleNodeCount.get();
-    case DEAD:
-      return deadNodeCount.get();
-    case INVALID:
-      // This is unknown due to the fact that some nodes can be in
-      // transit between the other states. Returning a count for that is not
-      // possible. The fact that we have such state is to deal with the fact
-      // that this information might not be consistent always.
-      return 0;
-    default:
-      return 0;
-    }
-  }
-
-  /**
-   * Used for testing.
-   *
-   * @return true if the HB check is done.
-   */
-  @VisibleForTesting
-  @Override
-  public boolean waitForHeartbeatProcessed() {
-    return lastHBcheckFinished != 0;
+    return nodeStateManager.getNodeCount(nodestate);
   }
 
   /**
@@ -413,236 +290,14 @@ public class SCMNodeManager
    */
   @Override
   public NodeState getNodeState(DatanodeDetails datanodeDetails) {
-    // There is a subtle race condition here, hence we also support
-    // the NODEState.UNKNOWN. It is possible that just before we check the
-    // healthyNodes, we have removed the node from the healthy list but stil
-    // not added it to Stale Nodes list.
-    // We can fix that by adding the node to stale list before we remove, but
-    // then the node is in 2 states to avoid this race condition. Instead we
-    // just deal with the possibilty of getting a state called unknown.
-
-    UUID id = datanodeDetails.getUuid();
-    if(healthyNodes.containsKey(id)) {
-      return HEALTHY;
-    }
-
-    if(staleNodes.containsKey(id)) {
-      return STALE;
-    }
-
-    if(deadNodes.containsKey(id)) {
-      return DEAD;
-    }
-
-    return INVALID;
-  }
-
-  /**
-   * This is the real worker thread that processes the HB queue. We do the
-   * following things in this thread.
-   * <p>
-   * Process the Heartbeats that are in the HB Queue. Move Stale or Dead node to
-   * healthy if we got a heartbeat from them. Move Stales Node to dead node
-   * table if it is needed. Move healthy nodes to stale nodes if it is needed.
-   * <p>
-   * if it is a new node, we call register node and add it to the list of nodes.
-   * This will be replaced when we support registration of a node in SCM.
-   *
-   * @see Thread#run()
-   */
-  @Override
-  public void run() {
-    lastHBcheckStart = monotonicNow();
-    lastHBProcessedCount = 0;
-
-    // Process the whole queue.
-    while (!heartbeatQueue.isEmpty() &&
-        (lastHBProcessedCount < maxHBToProcessPerLoop)) {
-      HeartbeatQueueItem hbItem = heartbeatQueue.poll();
-      synchronized (this) {
-        handleHeartbeat(hbItem);
-      }
-      // we are shutting down or something give up processing the rest of
-      // HBs. This will terminate the HB processing thread.
-      if (Thread.currentThread().isInterrupted()) {
-        LOG.info("Current Thread is isInterrupted, shutting down HB " +
-            "processing thread for Node Manager.");
-        return;
-      }
-    }
-
-    if (lastHBProcessedCount >= maxHBToProcessPerLoop) {
-      LOG.error("SCM is being flooded by heartbeats. Not able to keep up with" +
-          " the heartbeat counts. Processed {} heartbeats. Breaking out of" +
-          " loop. Leaving rest to be processed later. ", lastHBProcessedCount);
-    }
-
-    // Iterate over the Stale nodes and decide if we need to move any node to
-    // dead State.
-    long currentTime = monotonicNow();
-    for (Map.Entry<UUID, Long> entry : staleNodes.entrySet()) {
-      if (currentTime - entry.getValue() > deadNodeIntervalMs) {
-        synchronized (this) {
-          moveStaleNodeToDead(entry);
-        }
-      }
-    }
-
-    // Iterate over the healthy nodes and decide if we need to move any node to
-    // Stale State.
-    currentTime = monotonicNow();
-    for (Map.Entry<UUID, Long> entry : healthyNodes.entrySet()) {
-      if (currentTime - entry.getValue() > staleNodeIntervalMs) {
-        synchronized (this) {
-          moveHealthyNodeToStale(entry);
-        }
-      }
-    }
-    lastHBcheckFinished = monotonicNow();
-
-    monitorHBProcessingTime();
-
-    // we purposefully make this non-deterministic. Instead of using a
-    // scheduleAtFixedFrequency  we will just go to sleep
-    // and wake up at the next rendezvous point, which is currentTime +
-    // heartbeatCheckerIntervalMs. This leads to the issue that we are now
-    // heart beating not at a fixed cadence, but clock tick + time taken to
-    // work.
-    //
-    // This time taken to work can skew the heartbeat processor thread.
-    // The reason why we don't care is because of the following reasons.
-    //
-    // 1. checkerInterval is general many magnitudes faster than datanode HB
-    // frequency.
-    //
-    // 2. if we have too much nodes, the SCM would be doing only HB
-    // processing, this could lead to SCM's CPU starvation. With this
-    // approach we always guarantee that  HB thread sleeps for a little while.
-    //
-    // 3. It is possible that we will never finish processing the HB's in the
-    // thread. But that means we have a mis-configured system. We will warn
-    // the users by logging that information.
-    //
-    // 4. And the most important reason, heartbeats are not blocked even if
-    // this thread does not run, they will go into the processing queue.
-
-    if (!Thread.currentThread().isInterrupted() &&
-        !executorService.isShutdown()) {
-      executorService.schedule(this, heartbeatCheckerIntervalMs, TimeUnit
-          .MILLISECONDS);
-    } else {
-      LOG.info("Current Thread is interrupted, shutting down HB processing " +
-          "thread for Node Manager.");
-    }
-  }
-
-  /**
-   * If we have taken too much time for HB processing, log that information.
-   */
-  private void monitorHBProcessingTime() {
-    if (TimeUnit.MILLISECONDS.toSeconds(lastHBcheckFinished -
-        lastHBcheckStart) > datanodeHBIntervalSeconds) {
-      LOG.error("Total time spend processing datanode HB's is greater than " +
-              "configured values for datanode heartbeats. Please adjust the" +
-              " heartbeat configs. Time Spend on HB processing: {} seconds " +
-              "Datanode heartbeat Interval: {} seconds , heartbeats " +
-              "processed: {}",
-          TimeUnit.MILLISECONDS
-              .toSeconds(lastHBcheckFinished - lastHBcheckStart),
-          datanodeHBIntervalSeconds, lastHBProcessedCount);
-    }
-  }
-
-  /**
-   * Moves a Healthy node to a Stale node state.
-   *
-   * @param entry - Map Entry
-   */
-  private void moveHealthyNodeToStale(Map.Entry<UUID, Long> entry) {
-    LOG.trace("Moving healthy node to stale: {}", entry.getKey());
-    healthyNodes.remove(entry.getKey());
-    healthyNodeCount.decrementAndGet();
-    staleNodes.put(entry.getKey(), entry.getValue());
-    staleNodeCount.incrementAndGet();
-
-    if (scmManager != null) {
-      // remove stale node's container report
-      scmManager.removeContainerReport(entry.getKey().toString());
+    try {
+      return nodeStateManager.getNodeState(datanodeDetails);
+    } catch (NodeNotFoundException e) {
+      // TODO: should we throw NodeNotFoundException?
+      return null;
     }
   }
 
-  /**
-   * Moves a Stale node to a dead node state.
-   *
-   * @param entry - Map Entry
-   */
-  private void moveStaleNodeToDead(Map.Entry<UUID, Long> entry) {
-    LOG.trace("Moving stale node to dead: {}", entry.getKey());
-    staleNodes.remove(entry.getKey());
-    staleNodeCount.decrementAndGet();
-    deadNodes.put(entry.getKey(), entry.getValue());
-    deadNodeCount.incrementAndGet();
-
-    // Update SCM node stats
-    SCMNodeStat deadNodeStat = nodeStats.get(entry.getKey());
-    scmStat.subtract(deadNodeStat);
-    nodeStats.remove(entry.getKey());
-  }
-
-  /**
-   * Handles a single heartbeat from a datanode.
-   *
-   * @param hbItem - heartbeat item from a datanode.
-   */
-  private void handleHeartbeat(HeartbeatQueueItem hbItem) {
-    lastHBProcessedCount++;
-
-    DatanodeDetails datanodeDetails = hbItem.getDatanodeDetails();
-    UUID datanodeUuid = datanodeDetails.getUuid();
-    NodeReportProto nodeReport = hbItem.getNodeReport();
-    long recvTimestamp = hbItem.getRecvTimestamp();
-    long processTimestamp = Time.monotonicNow();
-    if (LOG.isTraceEnabled()) {
-      //TODO: add average queue time of heartbeat request as metrics
-      LOG.trace("Processing Heartbeat from datanode {}: queueing time {}",
-          datanodeUuid, processTimestamp - recvTimestamp);
-    }
-
-    // If this node is already in the list of known and healthy nodes
-    // just set the last timestamp and return.
-    if (healthyNodes.containsKey(datanodeUuid)) {
-      healthyNodes.put(datanodeUuid, processTimestamp);
-      updateNodeStat(datanodeUuid, nodeReport);
-      return;
-    }
-
-    // A stale node has heartbeat us we need to remove the node from stale
-    // list and move to healthy list.
-    if (staleNodes.containsKey(datanodeUuid)) {
-      staleNodes.remove(datanodeUuid);
-      healthyNodes.put(datanodeUuid, processTimestamp);
-      healthyNodeCount.incrementAndGet();
-      staleNodeCount.decrementAndGet();
-      updateNodeStat(datanodeUuid, nodeReport);
-      return;
-    }
-
-    // A dead node has heartbeat us, we need to remove that node from dead
-    // node list and move it to the healthy list.
-    if (deadNodes.containsKey(datanodeUuid)) {
-      deadNodes.remove(datanodeUuid);
-      healthyNodes.put(datanodeUuid, processTimestamp);
-      deadNodeCount.decrementAndGet();
-      healthyNodeCount.incrementAndGet();
-      updateNodeStat(datanodeUuid, nodeReport);
-      return;
-    }
-
-    LOG.warn("SCM receive heartbeat from unregistered datanode {}",
-        datanodeUuid);
-    this.commandQueue.addCommand(datanodeUuid,
-        new ReregisterCommand());
-  }
 
   private void updateNodeStat(UUID dnId, NodeReportProto nodeReport) {
     SCMNodeStat stat = nodeStats.get(dnId);
@@ -679,24 +334,6 @@ public class SCMNodeManager
   @Override
   public void close() throws IOException {
     unregisterMXBean();
-    executorService.shutdown();
-    try {
-      if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-        executorService.shutdownNow();
-      }
-
-      if (!executorService.awaitTermination(5, TimeUnit.SECONDS)) {
-        LOG.error("Unable to shutdown NodeManager properly.");
-      }
-    } catch (InterruptedException e) {
-      executorService.shutdownNow();
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  @VisibleForTesting
-  long getLastHBProcessedCount() {
-    return lastHBProcessedCount;
   }
 
   /**
@@ -739,27 +376,22 @@ public class SCMNodeManager
       datanodeDetails.setHostName(hostname);
       datanodeDetails.setIpAddress(ip);
     }
-    RegisteredCommand responseCommand = verifyDatanodeUUID(datanodeDetails);
-    if (responseCommand != null) {
-      return responseCommand;
-    }
     UUID dnId = datanodeDetails.getUuid();
-    nodes.put(dnId, datanodeDetails);
-    totalNodes.incrementAndGet();
-    healthyNodes.put(dnId, monotonicNow());
-    healthyNodeCount.incrementAndGet();
-    nodeStats.put(dnId, new SCMNodeStat());
-
-    if(inStartupChillMode.get() &&
-        totalNodes.get() >= getMinimumChillModeNodes()) {
-      inStartupChillMode.getAndSet(false);
-      LOG.info("Leaving startup chill mode.");
+    try {
+      nodeStateManager.addNode(datanodeDetails);
+      nodeStats.put(dnId, new SCMNodeStat());
+      if(inStartupChillMode.get() &&
+          nodeStateManager.getTotalNodeCount() >= getMinimumChillModeNodes()) {
+        inStartupChillMode.getAndSet(false);
+        LOG.info("Leaving startup chill mode.");
+      }
+      // Updating Node Report, as registration is successful
+      updateNodeStat(datanodeDetails.getUuid(), nodeReport);
+      LOG.info("Data node with ID: {} Registered.", datanodeDetails.getUuid());
+    } catch (NodeAlreadyExistsException e) {
+      LOG.trace("Datanode is already registered. Datanode: {}",
+          datanodeDetails.toString());
     }
-
-    // Updating Node Report, as registration is successful
-    updateNodeStat(datanodeDetails.getUuid(), nodeReport);
-    LOG.info("Data node with ID: {} Registered.",
-        datanodeDetails.getUuid());
     RegisteredCommand.Builder builder =
         RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
             .setDatanodeUUID(datanodeDetails.getUuidString())
@@ -771,45 +403,24 @@ public class SCMNodeManager
   }
 
   /**
-   * Verifies the datanode does not have a valid UUID already.
-   *
-   * @param datanodeDetails - Datanode Details.
-   * @return SCMCommand
-   */
-  private RegisteredCommand verifyDatanodeUUID(
-      DatanodeDetails datanodeDetails) {
-    if (datanodeDetails.getUuid() != null &&
-        nodes.containsKey(datanodeDetails.getUuid())) {
-      LOG.trace("Datanode is already registered. Datanode: {}",
-          datanodeDetails.toString());
-      return RegisteredCommand.newBuilder()
-          .setErrorCode(ErrorCode.success)
-          .setClusterID(this.clusterID)
-          .setDatanodeUUID(datanodeDetails.getUuidString())
-          .build();
-    }
-    return null;
-  }
-
-  /**
    * Send heartbeat to indicate the datanode is alive and doing well.
    *
    * @param datanodeDetails - DatanodeDetailsProto.
-   * @param nodeReport - node report.
    * @return SCMheartbeat response.
    * @throws IOException
    */
   @Override
-  public List<SCMCommand> sendHeartbeat(
-      DatanodeDetails datanodeDetails, NodeReportProto nodeReport) {
-
+  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) {
     Preconditions.checkNotNull(datanodeDetails, "Heartbeat is missing " +
         "DatanodeDetails.");
-    heartbeatQueue.add(
-        new HeartbeatQueueItem.Builder()
-            .setDatanodeDetails(datanodeDetails)
-            .setNodeReport(nodeReport)
-            .build());
+    try {
+      nodeStateManager.updateLastHeartbeatTime(datanodeDetails);
+    } catch (NodeNotFoundException e) {
+      LOG.warn("SCM receive heartbeat from unregistered datanode {}",
+          datanodeDetails);
+      commandQueue.addCommand(datanodeDetails.getUuid(),
+          new ReregisterCommand());
+    }
     return commandQueue.getCommand(datanodeDetails.getUuid());
   }
 
@@ -855,11 +466,6 @@ public class SCMNodeManager
     this.commandQueue.addCommand(dnId, command);
   }
 
-  @VisibleForTesting
-  public void setStaleNodeIntervalMs(long interval) {
-    this.staleNodeIntervalMs = interval;
-  }
-
   @Override
   public void onMessage(CommandForDatanode commandForDatanode,
       EventPublisher publisher) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java
new file mode 100644
index 0000000..aa5c382
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeAlreadyExistsException.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+/**
+ * This exception represents that there is already a node added to NodeStateMap
+ * with same UUID.
+ */
+public class NodeAlreadyExistsException extends NodeException {
+
+  /**
+   * Constructs an {@code NodeAlreadyExistsException} with {@code null}
+   * as its error detail message.
+   */
+  public NodeAlreadyExistsException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code NodeAlreadyExistsException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public NodeAlreadyExistsException(String message) {
+    super(message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java
new file mode 100644
index 0000000..c67b55d
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeException.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+/**
+ * This exception represents all node related exceptions in NodeStateMap.
+ */
+public class NodeException extends Exception {
+
+  /**
+   * Constructs an {@code NodeException} with {@code null}
+   * as its error detail message.
+   */
+  public NodeException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code NodeException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public NodeException(String message) {
+    super(message);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java
new file mode 100644
index 0000000..52a352e
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeNotFoundException.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import java.io.IOException;
+
+/**
+ * This exception represents that the node that is being accessed does not
+ * exist in NodeStateMap.
+ */
+public class NodeNotFoundException extends NodeException {
+
+
+  /**
+   * Constructs an {@code NodeNotFoundException} with {@code null}
+   * as its error detail message.
+   */
+  public NodeNotFoundException() {
+    super();
+  }
+
+  /**
+   * Constructs an {@code NodeNotFoundException} with the specified
+   * detail message.
+   *
+   * @param message
+   *        The detail message (which is saved for later retrieval
+   *        by the {@link #getMessage()} method)
+   */
+  public NodeNotFoundException(String message) {
+    super(message);
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: Revert "Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk"

Posted by vi...@apache.org.
Revert "Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk"

This reverts commit c163d1797ade0f47d35b4a44381b8ef1dfec5b60, reversing
changes made to 0d9804dcef2eab5ebf84667d9ca49bb035d9a731.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39ad9890
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39ad9890
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39ad9890

Branch: refs/heads/HDFS-12090
Commit: 39ad98903a5f042573b97a2e5438bc57af7cc7a1
Parents: c163d17
Author: Anu Engineer <ae...@apache.org>
Authored: Thu Jul 5 12:22:18 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Thu Jul 5 12:22:18 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  17 +-
 .../hadoop/yarn/api/records/Resource.java       |  13 -
 .../api/records/impl/LightWeightResource.java   |  23 +-
 .../scheduler/fair/ConfigurableResource.java    |  69 +----
 .../fair/FairSchedulerConfiguration.java        | 174 ++----------
 .../allocation/AllocationFileQueueParser.java   |   2 +-
 .../resourcemanager/webapp/dao/AppInfo.java     |   2 +-
 .../webapp/dao/SchedulerInfo.java               |   8 +-
 .../fair/TestFairSchedulerConfiguration.java    | 160 +++--------
 .../webapp/TestRMWebServices.java               |  31 +--
 .../webapp/TestRMWebServicesApps.java           |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 -----------------
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 -
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 ++++---
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ----------
 .../FairSchedulerJsonVerifications.java         | 139 ----------
 .../FairSchedulerXmlVerifications.java          | 153 -----------
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 -------------------
 .../webapp/helper/AppInfoJsonVerifications.java | 123 ---------
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 ---------
 .../webapp/helper/BufferedClientResponse.java   |  57 ----
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ------
 .../ResourceRequestsJsonVerifications.java      | 252 -----------------
 .../ResourceRequestsXmlVerifications.java       | 215 ---------------
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 --------
 .../src/site/markdown/FairScheduler.md          |   6 +-
 28 files changed, 157 insertions(+), 2405 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5cc81e5..5841361 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -67,6 +67,11 @@
   </Match>
   <Match>
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
+    <Method name="getLocalityStatistics" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="incNumAllocatedContainers"/>
     <Bug pattern="VO_VOLATILE_INCREMENT" />
   </Match>
@@ -113,18 +118,6 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
 
-  <!-- Ignore exposed internal representations -->
-  <Match>
-    <Class name="org.apache.hadoop.yarn.api.records.Resource" />
-    <Method name="getResources" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
-    <Method name="getLocalityStatistics" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-
   <!-- Object cast is based on the event type -->
   <Match>
     <Class name="org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher" />

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 173d4c9..71a6b54 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
-import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
@@ -76,18 +75,6 @@ public abstract class Resource implements Comparable<Resource> {
   @Private
   public static final int VCORES_INDEX = 1;
 
-  /**
-   * Return a new {@link Resource} instance with all resource values
-   * initialized to {@code value}.
-   * @param value the value to use for all resources
-   * @return a new {@link Resource} instance
-   */
-  @Private
-  @Unstable
-  public static Resource newInstance(long value) {
-    return new LightWeightResource(value);
-  }
-
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index 77f77f3..a6e6432 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -18,8 +18,9 @@
 
 package org.apache.hadoop.yarn.api.records.impl;
 
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -57,29 +58,13 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.*;
  *
  * @see Resource
  */
-@Private
+@InterfaceAudience.Private
 @Unstable
 public class LightWeightResource extends Resource {
 
   private ResourceInformation memoryResInfo;
   private ResourceInformation vcoresResInfo;
 
-  /**
-   * Create a new {@link LightWeightResource} instance with all resource values
-   * initialized to {@code value}.
-   * @param value the value to use for all resources
-   */
-  public LightWeightResource(long value) {
-    ResourceInformation[] types = ResourceUtils.getResourceTypesArray();
-    initResourceInformations(value, value, types.length);
-
-    for (int i = 2; i < types.length; i++) {
-      resources[i] = new ResourceInformation();
-      ResourceInformation.copy(types[i], resources[i]);
-      resources[i].setValue(value);
-    }
-  }
-
   public LightWeightResource(long memory, int vcores) {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
@@ -106,7 +91,7 @@ public class LightWeightResource extends Resource {
     }
   }
 
-  private void initResourceInformations(long memory, long vcores,
+  private void initResourceInformations(long memory, int vcores,
       int numberOfKnownResourceTypes) {
     this.memoryResInfo = newDefaultInformation(MEMORY_URI, MEMORY_MB.getUnits(),
         memory);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
index 0c3b0dd..ecdd011 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
@@ -18,13 +18,9 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import java.util.Arrays;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
-import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 /**
  * A {@code ConfigurableResource} object represents an entity that is used to
@@ -37,53 +33,29 @@ public class ConfigurableResource {
   private final Resource resource;
   private final double[] percentages;
 
-  ConfigurableResource() {
-    this(getOneHundredPercentArray());
-  }
-
-  ConfigurableResource(double[] percentages) {
+  public ConfigurableResource(double[] percentages) {
     this.percentages = percentages.clone();
     this.resource = null;
   }
 
-  ConfigurableResource(long value) {
-    this(Resource.newInstance(value));
-  }
-
   public ConfigurableResource(Resource resource) {
     this.percentages = null;
     this.resource = resource;
   }
 
-  private static double[] getOneHundredPercentArray() {
-    double[] resourcePercentages =
-        new double[ResourceUtils.getNumberOfKnownResourceTypes()];
-    Arrays.fill(resourcePercentages, 1.0);
-
-    return resourcePercentages;
-  }
-
   /**
    * Get resource by multiplying the cluster resource and the percentage of
    * each resource respectively. Return the absolute resource if either
    * {@code percentages} or {@code clusterResource} is null.
    *
    * @param clusterResource the cluster resource
-   * @return resource the resulting resource
+   * @return resource
    */
   public Resource getResource(Resource clusterResource) {
     if (percentages != null && clusterResource != null) {
       long memory = (long) (clusterResource.getMemorySize() * percentages[0]);
       int vcore = (int) (clusterResource.getVirtualCores() * percentages[1]);
-      Resource res = Resource.newInstance(memory, vcore);
-      ResourceInformation[] clusterInfo = clusterResource.getResources();
-
-      for (int i = 2; i < clusterInfo.length; i++) {
-        res.setResourceValue(i,
-            (long)(clusterInfo[i].getValue() * percentages[i]));
-      }
-
-      return res;
+      return Resource.newInstance(memory, vcore);
     } else {
       return resource;
     }
@@ -97,39 +69,4 @@ public class ConfigurableResource {
   public Resource getResource() {
     return resource;
   }
-
-  /**
-   * Set the value of the wrapped resource if this object isn't setup to use
-   * percentages. If this object is set to use percentages, this method has
-   * no effect.
-   *
-   * @param name the name of the resource
-   * @param value the value
-   */
-  void setValue(String name, long value) {
-    if (resource != null) {
-      resource.setResourceValue(name, value);
-    }
-  }
-
-  /**
-   * Set the percentage of the resource if this object is setup to use
-   * percentages. If this object is set to use percentages, this method has
-   * no effect.
-   *
-   * @param name the name of the resource
-   * @param value the percentage
-   */
-  void setPercentage(String name, double value) {
-    if (percentages != null) {
-      Integer index = ResourceUtils.getResourceTypeIndex().get(name);
-
-      if (index != null) {
-        percentages[index] = value;
-      } else {
-        throw new ResourceNotFoundException("The requested resource, \""
-            + name + "\", could not be found.");
-      }
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index 8c4932b..b50e4bb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -214,9 +213,6 @@ public class FairSchedulerConfiguration extends Configuration {
           CONF_PREFIX + "reservable-nodes";
   public static final float RESERVABLE_NODES_DEFAULT = 0.05f;
 
-  private static final String INVALID_RESOURCE_DEFINITION_PREFIX =
-          "Error reading resource config--invalid resource definition: ";
-
   public FairSchedulerConfiguration() {
     super();
   }
@@ -411,167 +407,54 @@ public class FairSchedulerConfiguration extends Configuration {
   }
 
   /**
-   * Parses a resource config value in one of three forms:
-   * <ol>
-   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
-   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
-   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
-   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
-   * </ol>
-   * In new style resources, any resource that is not specified will be
-   * set to {@link Long#MAX_VALUE} or 100%, as appropriate. Also, in the new
-   * style resources, units are not allowed. Units are assumed from the resource
-   * manager's settings for the resources when the value isn't a percentage.
-   *
-   * @param value the resource definition to parse
-   * @return a {@link ConfigurableResource} that represents the parsed value
-   * @throws AllocationConfigurationException if the raw value is not a valid
-   * resource definition
+   * Parses a resource config value of a form like "1024", "1024 mb",
+   * or "1024 mb, 3 vcores". If no units are given, megabytes are assumed.
+   * 
+   * @throws AllocationConfigurationException
    */
-  public static ConfigurableResource parseResourceConfigValue(String value)
+  public static ConfigurableResource parseResourceConfigValue(String val)
       throws AllocationConfigurationException {
-    return parseResourceConfigValue(value, Long.MAX_VALUE);
-  }
-
-  /**
-   * Parses a resource config value in one of three forms:
-   * <ol>
-   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
-   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
-   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
-   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
-   * </ol>
-   * In new style resources, any resource that is not specified will be
-   * set to {@code missing} or 0%, as appropriate. Also, in the new style
-   * resources, units are not allowed. Units are assumed from the resource
-   * manager's settings for the resources when the value isn't a percentage.
-   *
-   * The {@code missing} parameter is only used in the case of new style
-   * resources without percentages. With new style resources with percentages,
-   * any missing resources will be assumed to be 100% because percentages are
-   * only used with maximum resource limits.
-   *
-   * @param value the resource definition to parse
-   * @param missing the value to use for any unspecified resources
-   * @return a {@link ConfigurableResource} that represents the parsed value
-   * @throws AllocationConfigurationException if the raw value is not a valid
-   * resource definition
-   */
-  public static ConfigurableResource parseResourceConfigValue(String value,
-      long missing) throws AllocationConfigurationException {
     ConfigurableResource configurableResource;
-
-    if (value.trim().isEmpty()) {
-      throw new AllocationConfigurationException("Error reading resource "
-          + "config--the resource string is empty.");
-    }
-
     try {
-      if (value.contains("=")) {
-        configurableResource = parseNewStyleResource(value, missing);
-      } else if (value.contains("%")) {
-        configurableResource = parseOldStyleResourceAsPercentage(value);
+      val = StringUtils.toLowerCase(val);
+      if (val.contains("%")) {
+        configurableResource = new ConfigurableResource(
+            getResourcePercentage(val));
       } else {
-        configurableResource = parseOldStyleResource(value);
+        int memory = findResource(val, "mb");
+        int vcores = findResource(val, "vcores");
+        configurableResource = new ConfigurableResource(
+            BuilderUtils.newResource(memory, vcores));
       }
-    } catch (RuntimeException ex) {
+    } catch (AllocationConfigurationException ex) {
+      throw ex;
+    } catch (Exception ex) {
       throw new AllocationConfigurationException(
           "Error reading resource config", ex);
     }
-
-    return configurableResource;
-  }
-
-  private static ConfigurableResource parseNewStyleResource(String value,
-          long missing) throws AllocationConfigurationException {
-
-    final ConfigurableResource configurableResource;
-    boolean asPercent = value.contains("%");
-    if (asPercent) {
-      configurableResource = new ConfigurableResource();
-    } else {
-      configurableResource = new ConfigurableResource(missing);
-    }
-
-    String[] resources = value.split(",");
-    for (String resource : resources) {
-      String[] parts = resource.split("=");
-
-      if (parts.length != 2) {
-        throw createConfigException(value,
-                        "Every resource must be of the form: name=value.");
-      }
-
-      String resourceName = parts[0].trim();
-      String resourceValue = parts[1].trim();
-      try {
-        if (asPercent) {
-          configurableResource.setPercentage(resourceName,
-              findPercentage(resourceValue, ""));
-        } else {
-          configurableResource.setValue(resourceName,
-              Long.parseLong(resourceValue));
-        }
-      } catch (ResourceNotFoundException ex) {
-        throw createConfigException(value, "The "
-            + "resource name, \"" + resourceName + "\" was not "
-            + "recognized. Please check the value of "
-            + YarnConfiguration.RESOURCE_TYPES + " in the Resource "
-            + "Manager's configuration files.", ex);
-      } catch (NumberFormatException ex) {
-        // This only comes from Long.parseLong()
-        throw createConfigException(value, "The "
-            + "resource values must all be integers. \"" + resourceValue
-            + "\" is not an integer.", ex);
-      } catch (AllocationConfigurationException ex) {
-        // This only comes from findPercentage()
-        throw createConfigException(value, "The "
-            + "resource values must all be percentages. \""
-            + resourceValue + "\" is either not a number or does not "
-            + "include the '%' symbol.", ex);
-      }
-    }
     return configurableResource;
   }
 
-  private static ConfigurableResource parseOldStyleResourceAsPercentage(
-          String value) throws AllocationConfigurationException {
-    return new ConfigurableResource(
-            getResourcePercentage(StringUtils.toLowerCase(value)));
-  }
-
-  private static ConfigurableResource parseOldStyleResource(String value)
-          throws AllocationConfigurationException {
-    final String lCaseValue = StringUtils.toLowerCase(value);
-    int memory = findResource(lCaseValue, "mb");
-    int vcores = findResource(lCaseValue, "vcores");
-
-    return new ConfigurableResource(
-            BuilderUtils.newResource(memory, vcores));
-  }
-
   private static double[] getResourcePercentage(
       String val) throws AllocationConfigurationException {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
     double[] resourcePercentage = new double[numberOfKnownResourceTypes];
     String[] strings = val.split(",");
-
     if (strings.length == 1) {
       double percentage = findPercentage(strings[0], "");
       for (int i = 0; i < numberOfKnownResourceTypes; i++) {
-        resourcePercentage[i] = percentage;
+        resourcePercentage[i] = percentage/100;
       }
     } else {
-      resourcePercentage[0] = findPercentage(val, "memory");
-      resourcePercentage[1] = findPercentage(val, "cpu");
+      resourcePercentage[0] = findPercentage(val, "memory")/100;
+      resourcePercentage[1] = findPercentage(val, "cpu")/100;
     }
-
     return resourcePercentage;
   }
 
   private static double findPercentage(String val, String units)
-      throws AllocationConfigurationException {
+    throws AllocationConfigurationException {
     final Pattern pattern =
         Pattern.compile("((\\d+)(\\.\\d*)?)\\s*%\\s*" + units);
     Matcher matcher = pattern.matcher(val);
@@ -584,22 +467,7 @@ public class FairSchedulerConfiguration extends Configuration {
             units);
       }
     }
-    return Double.parseDouble(matcher.group(1)) / 100.0;
-  }
-
-  private static AllocationConfigurationException createConfigException(
-          String value, String message) {
-    return createConfigException(value, message, null);
-  }
-
-  private static AllocationConfigurationException createConfigException(
-      String value, String message, Throwable t) {
-    String msg = INVALID_RESOURCE_DEFINITION_PREFIX + value + ". " + message;
-    if (t != null) {
-      return new AllocationConfigurationException(msg, t);
-    } else {
-      return new AllocationConfigurationException(msg);
-    }
+    return Double.parseDouble(matcher.group(1));
   }
 
   public long getUpdateInterval() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
index 441c34a..d5a436e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
@@ -134,7 +134,7 @@ public class AllocationFileQueueParser {
       if (MIN_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);
         ConfigurableResource val =
-            FairSchedulerConfiguration.parseResourceConfigValue(text, 0L);
+            FairSchedulerConfiguration.parseResourceConfigValue(text);
         builder.minQueueResources(queueName, val.getResource());
       } else if (MAX_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 9d82bc7..d47f13d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -479,7 +479,7 @@ public class AppInfo {
   public int getNumNonAMContainersPreempted() {
     return numNonAMContainerPreempted;
   }
-
+  
   public int getNumAMContainersPreempted() {
     return numAMContainerPreempted;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index 163f707..81491b1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -41,9 +41,8 @@ public class SchedulerInfo {
   protected EnumSet<SchedulerResourceTypes> schedulingResourceTypes;
   protected int maximumClusterPriority;
 
-  // JAXB needs this
   public SchedulerInfo() {
-  }
+  } // JAXB needs this
 
   public SchedulerInfo(final ResourceManager rm) {
     ResourceScheduler rs = rm.getResourceScheduler();
@@ -75,10 +74,7 @@ public class SchedulerInfo {
   }
 
   public String getSchedulerResourceTypes() {
-    if (minAllocResource != null) {
-      return Arrays.toString(minAllocResource.getResource().getResources());
-    }
-    return null;
+    return Arrays.toString(minAllocResource.getResource().getResources());
   }
 
   public int getMaxClusterLevelAppPriority() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 70f83ab..481645b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -48,9 +48,6 @@ import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Assert;
 import org.junit.Test;
 
-/**
- * Tests fair scheduler configuration.
- */
 public class TestFairSchedulerConfiguration {
 
   private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
@@ -105,152 +102,67 @@ public class TestFairSchedulerConfiguration {
 
   @Test
   public void testParseResourceConfigValue() throws Exception {
-    Resource expected = BuilderUtils.newResource(5 * 1024, 2);
-    Resource clusterResource = BuilderUtils.newResource(10 * 1024, 4);
-
-    assertEquals(expected,
-        parseResourceConfigValue("2 vcores, 5120 mb").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("5120 mb, 2 vcores").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("2vcores,5120mb").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("5120mb,2vcores").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("5120mb   mb, 2    vcores").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("5120 Mb, 2 vCores").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("  5120 mb, 2 vcores  ").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("  5120.3 mb, 2.35 vcores  ").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("  5120. mb, 2. vcores  ").getResource());
-
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("2 vcores, 1024 mb").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("1024 mb, 2 vcores").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("2vcores,1024mb").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("1024mb,2vcores").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("1024   mb, 2    vcores").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("1024 Mb, 2 vCores").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("  1024 mb, 2 vcores  ").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("  1024.3 mb, 2.35 vcores  ").getResource());
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("  1024. mb, 2. vcores  ").getResource());
+
+    Resource clusterResource = BuilderUtils.newResource(2048, 4);
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50% memory, 50% cpu").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50% Memory, 50% CpU").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
+    assertEquals(BuilderUtils.newResource(1024, 2),
+        parseResourceConfigValue("50%").getResource(clusterResource));
+    assertEquals(BuilderUtils.newResource(1024, 4),
         parseResourceConfigValue("50% memory, 100% cpu").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
+    assertEquals(BuilderUtils.newResource(1024, 4),
         parseResourceConfigValue(" 100% cpu, 50% memory").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(5 * 1024, 0),
+    assertEquals(BuilderUtils.newResource(1024, 0),
         parseResourceConfigValue("50% memory, 0% cpu").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50 % memory, 50 % cpu").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50%memory,50%cpu").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("  50  %  memory,  50  %  cpu  ").
             getResource(clusterResource));
-    assertEquals(expected,
+    assertEquals(BuilderUtils.newResource(1024, 2),
         parseResourceConfigValue("50.% memory, 50.% cpu").
             getResource(clusterResource));
+
+    clusterResource =  BuilderUtils.newResource(1024 * 10, 4);
     assertEquals(BuilderUtils.newResource((int)(1024 * 10 * 0.109), 2),
         parseResourceConfigValue("10.9% memory, 50.6% cpu").
             getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("50%").getResource(clusterResource));
-
-    Configuration conf = new Configuration();
-
-    conf.set(YarnConfiguration.RESOURCE_TYPES, "test1");
-    ResourceUtils.resetResourceTypes(conf);
-
-    clusterResource = BuilderUtils.newResource(10 * 1024, 4);
-    expected = BuilderUtils.newResource(5 * 1024, 2);
-    expected.setResourceValue("test1", Long.MAX_VALUE);
-
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2, memory-mb=5120").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=5120, vcores=2").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2,memory-mb=5120").getResource());
-    assertEquals(expected, parseResourceConfigValue(" vcores = 2 , "
-            + "memory-mb = 5120 ").getResource());
-
-    expected.setResourceValue("test1", 0L);
-
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2, memory-mb=5120", 0L).getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=5120, vcores=2", 0L).getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2,memory-mb=5120", 0L).getResource());
-    assertEquals(expected,
-        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 ",
-            0L).getResource());
-
-    clusterResource.setResourceValue("test1", 8L);
-    expected.setResourceValue("test1", 4L);
-
-    assertEquals(expected,
-        parseResourceConfigValue("50%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2, memory-mb=5120, "
-            + "test1=4").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("test1=4, vcores=2, "
-            + "memory-mb=5120").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=5120, test1=4, "
-            + "vcores=2").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=2,memory-mb=5120,"
-            + "test1=4").getResource());
-    assertEquals(expected,
-        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 , "
-            + "test1 = 4 ").getResource());
-
-    expected = BuilderUtils.newResource(4 * 1024, 3);
-    expected.setResourceValue("test1", 8L);
-
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=75%, "
-            + "memory-mb=40%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=40%, "
-            + "vcores=75%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=75%,"
-            + "memory-mb=40%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue(" vcores = 75 % , "
-            + "memory-mb = 40 % ").getResource(clusterResource));
-
-    expected.setResourceValue("test1", 4L);
-
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=75%, memory-mb=40%, "
-            + "test1=50%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("test1=50%, vcores=75%, "
-            + "memory-mb=40%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("memory-mb=40%, test1=50%, "
-            + "vcores=75%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue("vcores=75%,memory-mb=40%,"
-            + "test1=50%").getResource(clusterResource));
-    assertEquals(expected,
-        parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
-            + "test1 = 50 % ").getResource(clusterResource));
   }
-
+  
   @Test(expected = AllocationConfigurationException.class)
   public void testNoUnits() throws Exception {
     parseResourceConfigValue("1024");
   }
-
+  
   @Test(expected = AllocationConfigurationException.class)
   public void testOnlyMemory() throws Exception {
     parseResourceConfigValue("1024mb");
@@ -260,7 +172,7 @@ public class TestFairSchedulerConfiguration {
   public void testOnlyCPU() throws Exception {
     parseResourceConfigValue("1024vcores");
   }
-
+  
   @Test(expected = AllocationConfigurationException.class)
   public void testGibberish() throws Exception {
     parseResourceConfigValue("1o24vc0res");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 3902889..0702d65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -53,7 +53,11 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.*;
+import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
+import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -72,12 +76,11 @@ import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
+import org.eclipse.jetty.server.Response;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
@@ -93,8 +96,6 @@ import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 public class TestRMWebServices extends JerseyTestBase {
-  private static final Logger LOG =
-          LoggerFactory.getLogger(TestRMWebServices.class);
 
   private static MockRM rm;
 
@@ -471,19 +472,19 @@ public class TestRMWebServices extends JerseyTestBase {
     QueueMetrics metrics = rs.getRootQueueMetrics();
     ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
 
-    long totalMBExpect =
+    long totalMBExpect = 
         metrics.getAvailableMB() + metrics.getAllocatedMB();
-    long totalVirtualCoresExpect =
+    long totalVirtualCoresExpect = 
         metrics.getAvailableVirtualCores() + metrics.getAllocatedVirtualCores();
-    assertEquals("appsSubmitted doesn't match",
+    assertEquals("appsSubmitted doesn't match", 
         metrics.getAppsSubmitted(), submittedApps);
-    assertEquals("appsCompleted doesn't match",
+    assertEquals("appsCompleted doesn't match", 
         metrics.getAppsCompleted(), completedApps);
     assertEquals("reservedMB doesn't match",
         metrics.getReservedMB(), reservedMB);
-    assertEquals("availableMB doesn't match",
+    assertEquals("availableMB doesn't match", 
         metrics.getAvailableMB(), availableMB);
-    assertEquals("allocatedMB doesn't match",
+    assertEquals("allocatedMB doesn't match", 
         metrics.getAllocatedMB(), allocMB);
     assertEquals("reservedVirtualCores doesn't match",
         metrics.getReservedVirtualCores(), reservedVirtualCores);
@@ -596,13 +597,11 @@ public class TestRMWebServices extends JerseyTestBase {
 
   public void verifyClusterSchedulerFifo(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements in: " + json, 1, json.length());
+    assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements in: " + info, 1, info.length());
+    assertEquals("incorrect number of elements", 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-
-    LOG.debug("schedulerInfo: {}", info);
-    assertEquals("incorrect number of elements in: " + info, 11, info.length());
+    assertEquals("incorrect number of elements", 11, info.length());
 
     verifyClusterSchedulerFifoGeneric(info.getString("type"),
         info.getString("qstate"), (float) info.getDouble("capacity"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 15f94e1..6c6f400 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -79,7 +79,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
 public class TestRMWebServicesApps extends JerseyTestBase {
 
   private static MockRM rm;
-
+  
   private static final int CONTAINER_MB = 1024;
 
   private static class WebServletModule extends ServletModule {
@@ -324,7 +324,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present",
+    assertTrue("both app states of ACCEPTED and KILLED are not present", 
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
@@ -375,12 +375,12 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present",
+    assertTrue("both app states of ACCEPTED and KILLED are not present", 
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
         array.getJSONObject(1).getString("state").equals("ACCEPTED")));
-
+    
     rm.stop();
   }
 
@@ -511,8 +511,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("finalStatus",
-                    FinalApplicationStatus.UNDEFINED.toString())
+        .path("apps").queryParam("finalStatus", FinalApplicationStatus.UNDEFINED.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
@@ -1805,8 +1804,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     int numAttempt = 1;
     while (true) {
       // fail the AM by sending CONTAINER_FINISHED event without registering.
-      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1,
-              ContainerState.COMPLETE);
+      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
       rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FAILED);
       if (numAttempt == maxAppAttempts) {
         rm.waitForState(app1.getApplicationId(), RMAppState.FAILED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
deleted file mode 100644
index 83e0056..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
-import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler.CustomResourceTypesConfigurationProvider;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoJsonVerifications;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoXmlVerifications;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.BufferedClientResponse;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.JsonCustomResourceTypeTestcase;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsJsonVerifications;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsXmlVerifications;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
-import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
-import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
-import org.apache.hadoop.yarn.webapp.JerseyTestBase;
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.junit.Before;
-import org.junit.Test;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import javax.ws.rs.core.MediaType;
-import java.util.ArrayList;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This test verifies that custom resource types are correctly serialized to XML
- * and JSON when HTTP GET request is sent to the resource: ws/v1/cluster/apps.
- */
-public class TestRMWebServicesAppsCustomResourceTypes extends JerseyTestBase {
-
-  private static MockRM rm;
-  private static final int CONTAINER_MB = 1024;
-
-  private static class WebServletModule extends ServletModule {
-    @Override
-    protected void configureServlets() {
-      bind(JAXBContextResolver.class);
-      bind(RMWebServices.class);
-      bind(GenericExceptionHandler.class);
-      Configuration conf = new Configuration();
-      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
-          YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
-      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
-          ResourceScheduler.class);
-      initResourceTypes(conf);
-      rm = new MockRM(conf);
-      bind(ResourceManager.class).toInstance(rm);
-      serve("/*").with(GuiceContainer.class);
-    }
-
-    private void initResourceTypes(Configuration conf) {
-      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
-          CustomResourceTypesConfigurationProvider.class.getName());
-      ResourceUtils.resetResourceTypes(conf);
-    }
-  }
-
-  @Before
-  @Override
-  public void setUp() throws Exception {
-    super.setUp();
-    createInjectorForWebServletModule();
-  }
-
-  private void createInjectorForWebServletModule() {
-    GuiceServletConfig
-        .setInjector(Guice.createInjector(new WebServletModule()));
-  }
-
-  public TestRMWebServicesAppsCustomResourceTypes() {
-    super(new WebAppDescriptor.Builder(
-        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-            .contextListenerClass(GuiceServletConfig.class)
-            .filterClass(com.google.inject.servlet.GuiceFilter.class)
-            .contextPath("jersey-guice-filter").servletPath("/").build());
-  }
-
-  @Test
-  public void testRunningAppXml() throws Exception {
-    rm.start();
-    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
-    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
-    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
-    am1.allocate("*", 2048, 1, new ArrayList<>());
-    amNodeManager.nodeHeartbeat(true);
-
-    WebResource r = resource();
-    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
-
-    XmlCustomResourceTypeTestCase testCase =
-            new XmlCustomResourceTypeTestCase(path,
-                    new BufferedClientResponse(response));
-    testCase.verify(document -> {
-      NodeList apps = document.getElementsByTagName("apps");
-      assertEquals("incorrect number of apps elements", 1, apps.getLength());
-
-      NodeList appArray = ((Element)(apps.item(0)))
-              .getElementsByTagName("app");
-      assertEquals("incorrect number of app elements", 1, appArray.getLength());
-
-      verifyAppsXML(appArray, app1);
-    });
-
-    rm.stop();
-  }
-
-  @Test
-  public void testRunningAppJson() throws Exception {
-    rm.start();
-    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
-    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
-    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
-    am1.allocate("*", 2048, 1, new ArrayList<>());
-    amNodeManager.nodeHeartbeat(true);
-
-    WebResource r = resource();
-    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
-    ClientResponse response =
-        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
-
-    JsonCustomResourceTypeTestcase testCase =
-        new JsonCustomResourceTypeTestcase(path,
-            new BufferedClientResponse(response));
-    testCase.verify(json -> {
-      try {
-        assertEquals("incorrect number of apps elements", 1, json.length());
-        JSONObject apps = json.getJSONObject("apps");
-        assertEquals("incorrect number of app elements", 1, apps.length());
-        JSONArray array = apps.getJSONArray("app");
-        assertEquals("incorrect count of app", 1, array.length());
-
-        verifyAppInfoJson(array.getJSONObject(0), app1);
-      } catch (JSONException e) {
-        throw new RuntimeException(e);
-      }
-    });
-
-    rm.stop();
-  }
-
-  private void verifyAppsXML(NodeList appArray, RMApp app) {
-    for (int i = 0; i < appArray.getLength(); i++) {
-      Element element = (Element) appArray.item(i);
-      AppInfoXmlVerifications.verify(element, app);
-
-      NodeList resourceRequests =
-          element.getElementsByTagName("resourceRequests");
-      assertEquals(1, resourceRequests.getLength());
-      Node resourceRequest = resourceRequests.item(0);
-      ResourceRequest rr =
-          ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
-              .getApplicationAttempt(
-                  app.getCurrentAppAttempt().getAppAttemptId())
-              .getAppSchedulingInfo().getAllResourceRequests().get(0);
-      ResourceRequestsXmlVerifications.verifyWithCustomResourceTypes(
-              (Element) resourceRequest, rr,
-          CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-    }
-  }
-
-  private void verifyAppInfoJson(JSONObject info, RMApp app) throws
-          JSONException {
-    int expectedNumberOfElements = getExpectedNumberOfElements(app);
-
-    assertEquals("incorrect number of elements", expectedNumberOfElements,
-        info.length());
-
-    AppInfoJsonVerifications.verify(info, app);
-
-    JSONArray resourceRequests = info.getJSONArray("resourceRequests");
-    JSONObject requestInfo = resourceRequests.getJSONObject(0);
-    ResourceRequest rr =
-        ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
-            .getApplicationAttempt(app.getCurrentAppAttempt().getAppAttemptId())
-            .getAppSchedulingInfo().getAllResourceRequests().get(0);
-
-    ResourceRequestsJsonVerifications.verifyWithCustomResourceTypes(
-            requestInfo, rr,
-            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
-  }
-
-  private int getExpectedNumberOfElements(RMApp app) {
-    int expectedNumberOfElements = 40 + 2; // 2 -> resourceRequests
-    if (app.getApplicationSubmissionContext()
-        .getNodeLabelExpression() != null) {
-      expectedNumberOfElements++;
-    }
-
-    if (app.getAMResourceRequests().get(0).getNodeLabelExpression() != null) {
-      expectedNumberOfElements++;
-    }
-
-    if (AppInfo
-        .getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()) != null) {
-      expectedNumberOfElements++;
-    }
-    return expectedNumberOfElements;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index 46d0a66..e37f76f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -146,7 +146,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     config.setUserLimitFactor(B2, 100.0f);
     config.setCapacity(B3, 0.5f);
     config.setUserLimitFactor(B3, 100.0f);
-
+    
     config.setQueues(A1, new String[] {"a1a", "a1b"});
     final String A1A = A1 + ".a1a";
     config.setCapacity(A1A, 85);
@@ -254,7 +254,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     }
   }
 
-  public void verifySubQueueXML(Element qElem, String q,
+  public void verifySubQueueXML(Element qElem, String q, 
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws Exception {
     NodeList children = qElem.getChildNodes();
@@ -317,34 +317,30 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
 
   private void verifyClusterScheduler(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements in: " + json, 1, json.length());
+    assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements in: " + info, 1, info.length());
+    assertEquals("incorrect number of elements", 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements in: " + info, 8, info.length());
+    assertEquals("incorrect number of elements", 8, info.length());
     verifyClusterSchedulerGeneric(info.getString("type"),
         (float) info.getDouble("usedCapacity"),
         (float) info.getDouble("capacity"),
         (float) info.getDouble("maxCapacity"), info.getString("queueName"));
     JSONObject health = info.getJSONObject("health");
     assertNotNull(health);
-    assertEquals("incorrect number of elements in: " + health, 3,
-        health.length());
+    assertEquals("incorrect number of elements", 3, health.length());
     JSONArray operationsInfo = health.getJSONArray("operationsInfo");
-    assertEquals("incorrect number of elements in: " + health, 4,
-        operationsInfo.length());
+    assertEquals("incorrect number of elements", 4, operationsInfo.length());
     JSONArray lastRunDetails = health.getJSONArray("lastRunDetails");
-    assertEquals("incorrect number of elements in: " + health, 3,
-        lastRunDetails.length());
+    assertEquals("incorrect number of elements", 3, lastRunDetails.length());
 
     JSONArray arr = info.getJSONObject("queues").getJSONArray("queue");
-    assertEquals("incorrect number of elements in: " + arr, 2, arr.length());
+    assertEquals("incorrect number of elements", 2, arr.length());
 
     // test subqueues
     for (int i = 0; i < arr.length(); i++) {
       JSONObject obj = arr.getJSONObject(i);
-      String q = CapacitySchedulerConfiguration.ROOT + "." +
-              obj.getString("queueName");
+      String q = CapacitySchedulerConfiguration.ROOT + "." + obj.getString("queueName");
       verifySubQueue(obj, q, 100, 100);
     }
   }
@@ -359,7 +355,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     assertTrue("queueName doesn't match", "root".matches(queueName));
   }
 
-  private void verifySubQueue(JSONObject info, String q,
+  private void verifySubQueue(JSONObject info, String q, 
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws JSONException, Exception {
     int numExpectedElements = 20;
@@ -468,7 +464,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
         csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
   }
 
-  //Return a child Node of node with the tagname or null if none exists
+  //Return a child Node of node with the tagname or null if none exists 
   private Node getChildNodeByName(Node node, String tagname) {
     NodeList nodeList = node.getChildNodes();
     for (int i=0; i < nodeList.getLength(); ++i) {
@@ -518,7 +514,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
           for (int j=0; j<users.getLength(); ++j) {
             Node user = users.item(j);
             String username = getChildNodeByName(user, "username")
-                .getTextContent();
+              .getTextContent(); 
             assertTrue(username.equals("user1") || username.equals("user2"));
             //Should be a parsable integer
             Integer.parseInt(getChildNodeByName(getChildNodeByName(user,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 99b5648..3d28f12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -42,8 +42,6 @@ import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response.Status;
@@ -61,8 +59,6 @@ import static org.junit.Assert.assertNull;
  * Test scheduler configuration mutation via REST API.
  */
 public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
-  private static final Logger LOG = LoggerFactory
-          .getLogger(TestRMWebServicesConfigurationMutation.class);
 
   private static final File CONF_FILE = new File(new File("target",
       "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
@@ -400,7 +396,6 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
             .entity(YarnWebServiceUtils.toJson(updateInfo,
                 SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
-    LOG.debug("Response headers: " + response.getHeaders());
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
     assertEquals(0.2f, newCSConf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
index 58c72ee..e77785b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,14 +16,13 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import javax.ws.rs.core.MediaType;
 
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -31,9 +30,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
-
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
-import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
@@ -42,18 +38,18 @@ import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Before;
 import org.junit.Test;
-import javax.ws.rs.core.MediaType;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
 
-/**
- * Tests RM Webservices fair scheduler resources.
- */
 public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   private static MockRM rm;
   private static YarnConfiguration conf;
-
+  
   private static class WebServletModule extends ServletModule {
     @Override
     protected void configureServlets() {
@@ -62,7 +58,7 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
       bind(GenericExceptionHandler.class);
       conf = new YarnConfiguration();
       conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
-          ResourceScheduler.class);
+        ResourceScheduler.class);
       rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       serve("/*").with(GuiceContainer.class);
@@ -70,32 +66,32 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   static {
-    GuiceServletConfig
-        .setInjector(Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig.setInjector(
+        Guice.createInjector(new WebServletModule()));
   }
 
   @Before
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    GuiceServletConfig
-        .setInjector(Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig.setInjector(
+        Guice.createInjector(new WebServletModule()));
   }
 
   public TestRMWebServicesFairScheduler() {
     super(new WebAppDescriptor.Builder(
         "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-            .contextListenerClass(GuiceServletConfig.class)
-            .filterClass(com.google.inject.servlet.GuiceFilter.class)
-            .contextPath("jersey-guice-filter").servletPath("/").build());
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
   }
-
+  
   @Test
-  public void testClusterScheduler() throws JSONException {
+  public void testClusterScheduler() throws JSONException, Exception {
     WebResource r = resource();
-    ClientResponse response =
-        r.path("ws").path("v1").path("cluster").path("scheduler")
-            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("scheduler").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -103,51 +99,52 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   @Test
-  public void testClusterSchedulerSlash() throws JSONException {
+  public void testClusterSchedulerSlash() throws JSONException, Exception {
     WebResource r = resource();
-    ClientResponse response =
-        r.path("ws").path("v1").path("cluster").path("scheduler/")
-            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("scheduler/").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     verifyClusterScheduler(json);
   }
-
+  
   @Test
-  public void testClusterSchedulerWithSubQueues()
-      throws JSONException {
-    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+  public void testClusterSchedulerWithSubQueues() throws JSONException,
+      Exception {
+    FairScheduler scheduler = (FairScheduler)rm.getResourceScheduler();
     QueueManager queueManager = scheduler.getQueueManager();
     // create LeafQueue
     queueManager.getLeafQueue("root.q.subqueue1", true);
     queueManager.getLeafQueue("root.q.subqueue2", true);
 
     WebResource r = resource();
-    ClientResponse response =
-        r.path("ws").path("v1").path("cluster").path("scheduler")
-            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    ClientResponse response = r.path("ws").path("v1").path("cluster")
+        .path("scheduler").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     JSONArray subQueueInfo = json.getJSONObject("scheduler")
         .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
-        .getJSONObject("childQueues").getJSONArray("queue").getJSONObject(1)
-        .getJSONObject("childQueues").getJSONArray("queue");
+        .getJSONObject("childQueues").getJSONArray("queue")
+        .getJSONObject(1).getJSONObject("childQueues").getJSONArray("queue");
     // subQueueInfo is consist of subqueue1 and subqueue2 info
     assertEquals(2, subQueueInfo.length());
 
     // Verify 'childQueues' field is omitted from FairSchedulerLeafQueueInfo.
     try {
       subQueueInfo.getJSONObject(1).getJSONObject("childQueues");
-      fail("FairSchedulerQueueInfo should omit field 'childQueues'"
-          + "if child queue is empty.");
+      fail("FairSchedulerQueueInfo should omit field 'childQueues'" +
+           "if child queue is empty.");
     } catch (JSONException je) {
       assertEquals("JSONObject[\"childQueues\"] not found.", je.getMessage());
     }
   }
 
-  private void verifyClusterScheduler(JSONObject json) throws JSONException {
+  private void verifyClusterScheduler(JSONObject json) throws JSONException,
+      Exception {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
     assertEquals("incorrect number of elements", 1, info.length());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
index 40cf483..1e61186 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
@@ -457,7 +457,7 @@ public class TestRMWebServicesSchedulerActivities
       if (object.getClass() == JSONObject.class) {
         assertEquals("Number of allocations is wrong", 1, realValue);
       } else if (object.getClass() == JSONArray.class) {
-        assertEquals("Number of allocations is wrong in: " + object,
+        assertEquals("Number of allocations is wrong",
             ((JSONArray) object).length(), realValue);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ad9890/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
deleted file mode 100644
index bb1fce0..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *     http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.LocalConfigurationProvider;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions.YarnException;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import static java.util.stream.Collectors.toList;
-
-/**
- * This class can generate an XML configuration file of custom resource types.
- * See createInitialResourceTypes for the default values. All custom resource
- * type is prefixed with CUSTOM_RESOURCE_PREFIX. Please use the
- * getConfigurationInputStream method to get an InputStream of the XML. If you
- * want to have different number of resources in your tests, please see usages
- * of this class in this test class:
- * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}
- *
- */
-public class CustomResourceTypesConfigurationProvider
-    extends LocalConfigurationProvider {
-
-  private static class CustomResourceTypes {
-    private int count;
-    private String xml;
-
-    CustomResourceTypes(String xml, int count) {
-      this.xml = xml;
-      this.count = count;
-    }
-
-    public int getCount() {
-      return count;
-    }
-
-    public String getXml() {
-      return xml;
-    }
-  }
-
-  private static final String CUSTOM_RESOURCE_PREFIX = "customResource-";
-
-  private static CustomResourceTypes customResourceTypes =
-      createInitialResourceTypes();
-
-  private static CustomResourceTypes createInitialResourceTypes() {
-    return createCustomResourceTypes(2);
-  }
-
-  private static CustomResourceTypes createCustomResourceTypes(int count) {
-    List<String> resourceTypeNames = generateResourceTypeNames(count);
-
-    List<String> resourceUnitXmlElements = IntStream.range(0, count)
-            .boxed()
-            .map(i -> getResourceUnitsXml(resourceTypeNames.get(i)))
-            .collect(toList());
-
-    StringBuilder sb = new StringBuilder("<configuration>\n");
-    sb.append(getResourceTypesXml(resourceTypeNames));
-
-    for (String resourceUnitXml : resourceUnitXmlElements) {
-      sb.append(resourceUnitXml);
-
-    }
-    sb.append("</configuration>");
-
-    return new CustomResourceTypes(sb.toString(), count);
-  }
-
-  private static List<String> generateResourceTypeNames(int count) {
-    return IntStream.range(0, count)
-            .boxed()
-            .map(i -> CUSTOM_RESOURCE_PREFIX + i)
-            .collect(toList());
-  }
-
-  private static String getResourceUnitsXml(String resource) {
-    return "<property>\n" + "<name>yarn.resource-types." + resource
-        + ".units</name>\n" + "<value>k</value>\n" + "</property>\n";
-  }
-
-  private static String getResourceTypesXml(List<String> resources) {
-    final String resourceTypes = makeCommaSeparatedString(resources);
-
-    return "<property>\n" + "<name>yarn.resource-types</name>\n" + "<value>"
-        + resourceTypes + "</value>\n" + "</property>\n";
-  }
-
-  private static String makeCommaSeparatedString(List<String> resources) {
-    return resources.stream().collect(Collectors.joining(","));
-  }
-
-  @Override
-  public InputStream getConfigurationInputStream(Configuration bootstrapConf,
-      String name) throws YarnException, IOException {
-    if (YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE.equals(name)) {
-      return new ByteArrayInputStream(
-          customResourceTypes.getXml().getBytes());
-    } else {
-      return super.getConfigurationInputStream(bootstrapConf, name);
-    }
-  }
-
-  public static void reset() {
-    customResourceTypes = createInitialResourceTypes();
-  }
-
-  public static void setNumberOfResourceTypes(int count) {
-    customResourceTypes = createCustomResourceTypes(count);
-  }
-
-  public static List<String> getCustomResourceTypes() {
-    return generateResourceTypeNames(customResourceTypes.getCount());
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: YARN-8435. Fix NPE when the same client simultaneously contact for the first time Yarn Router. Contributed by Rang Jiaheng.

Posted by vi...@apache.org.
YARN-8435. Fix NPE when the same client simultaneously contact for the first time Yarn Router. Contributed by Rang Jiaheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d9804dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d9804dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d9804dc

Branch: refs/heads/HDFS-12090
Commit: 0d9804dcef2eab5ebf84667d9ca49bb035d9a731
Parents: 71df8c2
Author: Giovanni Matteo Fumarola <gi...@apache.com>
Authored: Thu Jul 5 10:54:31 2018 -0700
Committer: Giovanni Matteo Fumarola <gi...@apache.com>
Committed: Thu Jul 5 10:54:31 2018 -0700

----------------------------------------------------------------------
 .../router/clientrm/RouterClientRMService.java  | 53 ++++++++--------
 .../router/rmadmin/RouterRMAdminService.java    | 51 ++++++++-------
 .../server/router/webapp/RouterWebServices.java | 48 +++++++--------
 .../clientrm/TestRouterClientRMService.java     | 60 ++++++++++++++++++
 .../rmadmin/TestRouterRMAdminService.java       | 60 ++++++++++++++++++
 .../router/webapp/TestRouterWebServices.java    | 65 ++++++++++++++++++++
 6 files changed, 259 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
index 73cc185..bbb8047 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/clientrm/RouterClientRMService.java
@@ -430,13 +430,15 @@ public class RouterClientRMService extends AbstractService
     return pipeline.getRootInterceptor().getResourceTypeInfo(request);
   }
 
-  private RequestInterceptorChainWrapper getInterceptorChain()
+  @VisibleForTesting
+  protected RequestInterceptorChainWrapper getInterceptorChain()
       throws IOException {
     String user = UserGroupInformation.getCurrentUser().getUserName();
-    if (!userPipelineMap.containsKey(user)) {
-      initializePipeline(user);
+    RequestInterceptorChainWrapper chain = userPipelineMap.get(user);
+    if (chain != null && chain.getRootInterceptor() != null) {
+      return chain;
     }
-    return userPipelineMap.get(user);
+    return initializePipeline(user);
   }
 
   /**
@@ -503,36 +505,33 @@ public class RouterClientRMService extends AbstractService
    *
    * @param user
    */
-  private void initializePipeline(String user) {
-    RequestInterceptorChainWrapper chainWrapper = null;
+  private RequestInterceptorChainWrapper initializePipeline(String user) {
     synchronized (this.userPipelineMap) {
       if (this.userPipelineMap.containsKey(user)) {
         LOG.info("Request to start an already existing user: {}"
             + " was received, so ignoring.", user);
-        return;
+        return userPipelineMap.get(user);
       }
 
-      chainWrapper = new RequestInterceptorChainWrapper();
-      this.userPipelineMap.put(user, chainWrapper);
-    }
-
-    // We register the pipeline instance in the map first and then initialize it
-    // later because chain initialization can be expensive and we would like to
-    // release the lock as soon as possible to prevent other applications from
-    // blocking when one application's chain is initializing
-    LOG.info("Initializing request processing pipeline for application "
-        + "for the user: {}", user);
-
-    try {
-      ClientRequestInterceptor interceptorChain =
-          this.createRequestInterceptorChain();
-      interceptorChain.init(user);
-      chainWrapper.init(interceptorChain);
-    } catch (Exception e) {
-      synchronized (this.userPipelineMap) {
-        this.userPipelineMap.remove(user);
+      RequestInterceptorChainWrapper chainWrapper =
+          new RequestInterceptorChainWrapper();
+      try {
+        // We should init the pipeline instance after it is created and then
+        // add to the map, to ensure thread safe.
+        LOG.info("Initializing request processing pipeline for application "
+            + "for the user: {}", user);
+
+        ClientRequestInterceptor interceptorChain =
+            this.createRequestInterceptorChain();
+        interceptorChain.init(user);
+        chainWrapper.init(interceptorChain);
+      } catch (Exception e) {
+        LOG.error("Init ClientRequestInterceptor error for user: " + user, e);
+        throw e;
       }
-      throw e;
+
+      this.userPipelineMap.put(user, chainWrapper);
+      return chainWrapper;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java
index b8b7ad8..ef30613 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/rmadmin/RouterRMAdminService.java
@@ -165,13 +165,15 @@ public class RouterRMAdminService extends AbstractService
     return interceptorClassNames;
   }
 
-  private RequestInterceptorChainWrapper getInterceptorChain()
+  @VisibleForTesting
+  protected RequestInterceptorChainWrapper getInterceptorChain()
       throws IOException {
     String user = UserGroupInformation.getCurrentUser().getUserName();
-    if (!userPipelineMap.containsKey(user)) {
-      initializePipeline(user);
+    RequestInterceptorChainWrapper chain = userPipelineMap.get(user);
+    if (chain != null && chain.getRootInterceptor() != null) {
+      return chain;
     }
-    return userPipelineMap.get(user);
+    return initializePipeline(user);
   }
 
   /**
@@ -239,35 +241,32 @@ public class RouterRMAdminService extends AbstractService
    *
    * @param user
    */
-  private void initializePipeline(String user) {
-    RequestInterceptorChainWrapper chainWrapper = null;
+  private RequestInterceptorChainWrapper initializePipeline(String user) {
     synchronized (this.userPipelineMap) {
       if (this.userPipelineMap.containsKey(user)) {
         LOG.info("Request to start an already existing user: {}"
             + " was received, so ignoring.", user);
-        return;
+        return userPipelineMap.get(user);
       }
 
-      chainWrapper = new RequestInterceptorChainWrapper();
-      this.userPipelineMap.put(user, chainWrapper);
-    }
-
-    // We register the pipeline instance in the map first and then initialize it
-    // later because chain initialization can be expensive and we would like to
-    // release the lock as soon as possible to prevent other applications from
-    // blocking when one application's chain is initializing
-    LOG.info("Initializing request processing pipeline for the user: {}", user);
-
-    try {
-      RMAdminRequestInterceptor interceptorChain =
-          this.createRequestInterceptorChain();
-      interceptorChain.init(user);
-      chainWrapper.init(interceptorChain);
-    } catch (Exception e) {
-      synchronized (this.userPipelineMap) {
-        this.userPipelineMap.remove(user);
+      RequestInterceptorChainWrapper chainWrapper =
+          new RequestInterceptorChainWrapper();
+      try {
+        // We should init the pipeline instance after it is created and then
+        // add to the map, to ensure thread safe.
+        LOG.info("Initializing request processing pipeline for user: {}", user);
+
+        RMAdminRequestInterceptor interceptorChain =
+            this.createRequestInterceptorChain();
+        interceptorChain.init(user);
+        chainWrapper.init(interceptorChain);
+      } catch (Exception e) {
+        LOG.error("Init RMAdminRequestInterceptor error for user: " + user, e);
+        throw e;
       }
-      throw e;
+
+      this.userPipelineMap.put(user, chainWrapper);
+      return chainWrapper;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
index ae57f1c..49de588 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
@@ -173,10 +173,11 @@ public class RouterWebServices implements RMWebServiceProtocol {
     } catch (IOException e) {
       LOG.error("Cannot get user: {}", e.getMessage());
     }
-    if (!userPipelineMap.containsKey(user)) {
-      initializePipeline(user);
+    RequestInterceptorChainWrapper chain = userPipelineMap.get(user);
+    if (chain != null && chain.getRootInterceptor() != null) {
+      return chain;
     }
-    return userPipelineMap.get(user);
+    return initializePipeline(user);
   }
 
   /**
@@ -242,35 +243,32 @@ public class RouterWebServices implements RMWebServiceProtocol {
    *
    * @param user
    */
-  private void initializePipeline(String user) {
-    RequestInterceptorChainWrapper chainWrapper = null;
+  private RequestInterceptorChainWrapper initializePipeline(String user) {
     synchronized (this.userPipelineMap) {
       if (this.userPipelineMap.containsKey(user)) {
         LOG.info("Request to start an already existing user: {}"
             + " was received, so ignoring.", user);
-        return;
+        return userPipelineMap.get(user);
       }
 
-      chainWrapper = new RequestInterceptorChainWrapper();
-      this.userPipelineMap.put(user, chainWrapper);
-    }
-
-    // We register the pipeline instance in the map first and then initialize it
-    // later because chain initialization can be expensive and we would like to
-    // release the lock as soon as possible to prevent other applications from
-    // blocking when one application's chain is initializing
-    LOG.info("Initializing request processing pipeline for the user: {}", user);
-
-    try {
-      RESTRequestInterceptor interceptorChain =
-          this.createRequestInterceptorChain();
-      interceptorChain.init(user);
-      chainWrapper.init(interceptorChain);
-    } catch (Exception e) {
-      synchronized (this.userPipelineMap) {
-        this.userPipelineMap.remove(user);
+      RequestInterceptorChainWrapper chainWrapper =
+          new RequestInterceptorChainWrapper();
+      try {
+        // We should init the pipeline instance after it is created and then
+        // add to the map, to ensure thread safe.
+        LOG.info("Initializing request processing pipeline for user: {}", user);
+
+        RESTRequestInterceptor interceptorChain =
+            this.createRequestInterceptorChain();
+        interceptorChain.init(user);
+        chainWrapper.init(interceptorChain);
+      } catch (Exception e) {
+        LOG.error("Init RESTRequestInterceptor error for user: " + user, e);
+        throw e;
       }
-      throw e;
+
+      this.userPipelineMap.put(user, chainWrapper);
+      return chainWrapper;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
index a9c3729..b03059d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/clientrm/TestRouterClientRMService.java
@@ -19,8 +19,10 @@
 package org.apache.hadoop.yarn.server.router.clientrm;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
@@ -207,4 +209,62 @@ public class TestRouterClientRMService extends BaseRouterClientRMTest {
     Assert.assertNull("test2 should have been evicted", chain);
   }
 
+  /**
+   * This test validates if the ClientRequestInterceptor chain for the user
+   * can build and init correctly when a multi-client process begins to
+   * request RouterClientRMService for the same user simultaneously.
+   */
+  @Test
+  public void testClientPipelineConcurrent() throws InterruptedException {
+    final String user = "test1";
+
+    /*
+     * ClientTestThread is a thread to simulate a client request to get a
+     * ClientRequestInterceptor for the user.
+     */
+    class ClientTestThread extends Thread {
+      private ClientRequestInterceptor interceptor;
+      @Override public void run() {
+        try {
+          interceptor = pipeline();
+        } catch (IOException | InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
+      private ClientRequestInterceptor pipeline()
+          throws IOException, InterruptedException {
+        return UserGroupInformation.createRemoteUser(user).doAs(
+            new PrivilegedExceptionAction<ClientRequestInterceptor>() {
+              @Override
+              public ClientRequestInterceptor run() throws Exception {
+                RequestInterceptorChainWrapper wrapper =
+                    getRouterClientRMService().getInterceptorChain();
+                ClientRequestInterceptor interceptor =
+                    wrapper.getRootInterceptor();
+                Assert.assertNotNull(interceptor);
+                LOG.info("init client interceptor success for user " + user);
+                return interceptor;
+              }
+            });
+      }
+    }
+
+    /*
+     * We start the first thread. It should not finish initing a chainWrapper
+     * before the other thread starts. In this way, the second thread can
+     * init at the same time of the first one. In the end, we validate that
+     * the 2 threads get the same chainWrapper without going into error.
+     */
+    ClientTestThread client1 = new ClientTestThread();
+    ClientTestThread client2 = new ClientTestThread();
+    client1.start();
+    client2.start();
+    client1.join();
+    client2.join();
+
+    Assert.assertNotNull(client1.interceptor);
+    Assert.assertNotNull(client2.interceptor);
+    Assert.assertTrue(client1.interceptor == client2.interceptor);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
index 11786e6..07ef73c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/rmadmin/TestRouterRMAdminService.java
@@ -19,8 +19,10 @@
 package org.apache.hadoop.yarn.server.router.rmadmin;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse;
@@ -216,4 +218,62 @@ public class TestRouterRMAdminService extends BaseRouterRMAdminTest {
     Assert.assertNull("test2 should have been evicted", chain);
   }
 
+  /**
+   * This test validates if the RMAdminRequestInterceptor chain for the user
+   * can build and init correctly when a multi-client process begins to
+   * request RouterRMAdminService for the same user simultaneously.
+   */
+  @Test
+  public void testRMAdminPipelineConcurrent() throws InterruptedException {
+    final String user = "test1";
+
+    /*
+     * ClientTestThread is a thread to simulate a client request to get a
+     * RMAdminRequestInterceptor for the user.
+     */
+    class ClientTestThread extends Thread {
+      private RMAdminRequestInterceptor interceptor;
+      @Override public void run() {
+        try {
+          interceptor = pipeline();
+        } catch (IOException | InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
+      private RMAdminRequestInterceptor pipeline()
+          throws IOException, InterruptedException {
+        return UserGroupInformation.createRemoteUser(user).doAs(
+            new PrivilegedExceptionAction<RMAdminRequestInterceptor>() {
+              @Override
+              public RMAdminRequestInterceptor run() throws Exception {
+                RequestInterceptorChainWrapper wrapper =
+                    getRouterRMAdminService().getInterceptorChain();
+                RMAdminRequestInterceptor interceptor =
+                    wrapper.getRootInterceptor();
+                Assert.assertNotNull(interceptor);
+                LOG.info("init rm admin interceptor success for user" + user);
+                return interceptor;
+              }
+            });
+      }
+    }
+
+    /*
+     * We start the first thread. It should not finish initing a chainWrapper
+     * before the other thread starts. In this way, the second thread can
+     * init at the same time of the first one. In the end, we validate that
+     * the 2 threads get the same chainWrapper without going into error.
+     */
+    ClientTestThread client1 = new ClientTestThread();
+    ClientTestThread client2 = new ClientTestThread();
+    client1.start();
+    client2.start();
+    client1.join();
+    client2.join();
+
+    Assert.assertNotNull(client1.interceptor);
+    Assert.assertNotNull(client2.interceptor);
+    Assert.assertTrue(client1.interceptor == client2.interceptor);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d9804dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java
index c96575c..1465243 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestRouterWebServices.java
@@ -19,10 +19,12 @@
 package org.apache.hadoop.yarn.server.router.webapp;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 
 import javax.ws.rs.core.Response;
 
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo;
@@ -49,12 +51,17 @@ import org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test class to validate the WebService interceptor model inside the Router.
  */
 public class TestRouterWebServices extends BaseRouterWebServicesTest {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestRouterWebServices.class);
+
   private String user = "test1";
 
   /**
@@ -266,4 +273,62 @@ public class TestRouterWebServices extends BaseRouterWebServicesTest {
     Assert.assertNull("test2 should have been evicted", chain);
   }
 
+  /**
+   * This test validates if the RESTRequestInterceptor chain for the user
+   * can build and init correctly when a multi-client process begins to
+   * request RouterWebServices for the same user simultaneously.
+   */
+  @Test
+  public void testWebPipelineConcurrent() throws InterruptedException {
+    final String user = "test1";
+
+    /*
+     * ClientTestThread is a thread to simulate a client request to get a
+     * RESTRequestInterceptor for the user.
+     */
+    class ClientTestThread extends Thread {
+      private RESTRequestInterceptor interceptor;
+      @Override public void run() {
+        try {
+          interceptor = pipeline();
+        } catch (IOException | InterruptedException e) {
+          e.printStackTrace();
+        }
+      }
+      private RESTRequestInterceptor pipeline()
+          throws IOException, InterruptedException {
+        return UserGroupInformation.createRemoteUser(user).doAs(
+            new PrivilegedExceptionAction<RESTRequestInterceptor>() {
+              @Override
+              public RESTRequestInterceptor run() throws Exception {
+                RequestInterceptorChainWrapper wrapper =
+                    getInterceptorChain(user);
+                RESTRequestInterceptor interceptor =
+                    wrapper.getRootInterceptor();
+                Assert.assertNotNull(interceptor);
+                LOG.info("init web interceptor success for user" + user);
+                return interceptor;
+              }
+            });
+      }
+    }
+
+    /*
+     * We start the first thread. It should not finish initing a chainWrapper
+     * before the other thread starts. In this way, the second thread can
+     * init at the same time of the first one. In the end, we validate that
+     * the 2 threads get the same chainWrapper without going into error.
+     */
+    ClientTestThread client1 = new ClientTestThread();
+    ClientTestThread client2 = new ClientTestThread();
+    client1.start();
+    client2.start();
+    client1.join();
+    client2.join();
+
+    Assert.assertNotNull(client1.interceptor);
+    Assert.assertNotNull(client2.interceptor);
+    Assert.assertTrue(client1.interceptor == client2.interceptor);
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
new file mode 100644
index 0000000..ca03554
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/ozoneManager.js
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+(function () {
+    "use strict";
+
+    var isIgnoredJmxKeys = function (key) {
+        return key == 'name' || key == 'modelerType' || key.match(/tag.*/);
+    };
+
+    angular.module('ozoneManager', ['ozone', 'nvd3']);
+    angular.module('ozoneManager').config(function ($routeProvider) {
+        $routeProvider
+            .when("/metrics/ozoneManager", {
+                template: "<om-metrics></om-metrics>"
+            });
+    });
+    angular.module('ozoneManager').component('omMetrics', {
+        templateUrl: 'om-metrics.html',
+        controller: function ($http) {
+            var ctrl = this;
+
+            ctrl.graphOptions = {
+                chart: {
+                    type: 'pieChart',
+                    height: 500,
+                    x: function (d) {
+                        return d.key;
+                    },
+                    y: function (d) {
+                        return d.value;
+                    },
+                    showLabels: true,
+                    labelType: 'value',
+                    duration: 500,
+                    labelThreshold: 0.01,
+                    valueFormat: function(d) {
+                        return d3.format('d')(d);
+                    },
+                    legend: {
+                        margin: {
+                            top: 5,
+                            right: 35,
+                            bottom: 5,
+                            left: 0
+                        }
+                    }
+                }
+            };
+
+
+            $http.get("jmx?qry=Hadoop:service=OzoneManager,name=OMMetrics")
+                .then(function (result) {
+
+                    var groupedMetrics = {others: [], nums: {}};
+                    var metrics = result.data.beans[0]
+                    for (var key in metrics) {
+                        var numericalStatistic = key.match(/Num([A-Z][a-z]+)(.+?)(Fails)?$/);
+                        if (numericalStatistic) {
+                            var type = numericalStatistic[1];
+                            var name = numericalStatistic[2];
+                            var failed = numericalStatistic[3];
+                            groupedMetrics.nums[type] = groupedMetrics.nums[type] || {
+                                    failures: [],
+                                    all: []
+                                };
+                            if (failed) {
+                                groupedMetrics.nums[type].failures.push({
+                                    key: name,
+                                    value: metrics[key]
+                                })
+                            } else {
+                                if (name == "Ops") {
+                                    groupedMetrics.nums[type].ops = metrics[key]
+                                } else {
+                                    groupedMetrics.nums[type].all.push({
+                                        key: name,
+                                        value: metrics[key]
+                                    })
+                                }
+                            }
+                        } else if (isIgnoredJmxKeys(key)) {
+                            //ignore
+                        } else {
+                            groupedMetrics.others.push({
+                                'key': key,
+                                'value': metrics[key]
+                            });
+                        }
+                    }
+                    ctrl.metrics = groupedMetrics;
+                })
+        }
+    });
+
+})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
deleted file mode 100644
index 0b43bf9..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestBucketManagerImpl.java
+++ /dev/null
@@ -1,395 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.exceptions
-    .KSMException.ResultCodes;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.runners.MockitoJUnitRunner;
-import org.mockito.stubbing.Answer;
-
-import java.io.IOException;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.LinkedList;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import static org.mockito.Mockito.any;
-
-/**
- * Tests BucketManagerImpl, mocks KSMMetadataManager for testing.
- */
-@RunWith(MockitoJUnitRunner.class)
-public class TestBucketManagerImpl {
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  private KSMMetadataManager getMetadataManagerMock(String... volumesToCreate)
-      throws IOException {
-    KSMMetadataManager metadataManager = Mockito.mock(KSMMetadataManager.class);
-    Map<String, byte[]> metadataDB = new HashMap<>();
-    ReadWriteLock lock = new ReentrantReadWriteLock();
-
-    Mockito.when(metadataManager.writeLock()).thenReturn(lock.writeLock());
-    Mockito.when(metadataManager.readLock()).thenReturn(lock.readLock());
-    Mockito.when(metadataManager.getVolumeKey(any(String.class))).thenAnswer(
-        (InvocationOnMock invocation) ->
-            DFSUtil.string2Bytes(
-                OzoneConsts.KSM_VOLUME_PREFIX + invocation.getArguments()[0]));
-    Mockito.when(metadataManager
-        .getBucketKey(any(String.class), any(String.class))).thenAnswer(
-            (InvocationOnMock invocation) ->
-                DFSUtil.string2Bytes(
-                    OzoneConsts.KSM_VOLUME_PREFIX
-                        + invocation.getArguments()[0]
-                        + OzoneConsts.KSM_BUCKET_PREFIX
-                        + invocation.getArguments()[1]));
-
-    Mockito.doAnswer(
-        new Answer<Boolean>() {
-          @Override
-          public Boolean answer(InvocationOnMock invocation)
-              throws Throwable {
-            String keyRootName =  OzoneConsts.KSM_KEY_PREFIX
-                + invocation.getArguments()[0]
-                + OzoneConsts.KSM_KEY_PREFIX
-                + invocation.getArguments()[1]
-                + OzoneConsts.KSM_KEY_PREFIX;
-            Iterator<String> keyIterator = metadataDB.keySet().iterator();
-            while(keyIterator.hasNext()) {
-              if(keyIterator.next().startsWith(keyRootName)) {
-                return false;
-              }
-            }
-            return true;
-          }
-        }).when(metadataManager).isBucketEmpty(any(String.class),
-        any(String.class));
-
-    Mockito.doAnswer(
-        new Answer<Void>() {
-          @Override
-          public Void answer(InvocationOnMock invocation) throws Throwable {
-            metadataDB.put(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]),
-                (byte[])invocation.getArguments()[1]);
-            return null;
-          }
-        }).when(metadataManager).put(any(byte[].class), any(byte[].class));
-
-    Mockito.when(metadataManager.get(any(byte[].class))).thenAnswer(
-        (InvocationOnMock invocation) ->
-            metadataDB.get(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]))
-    );
-    Mockito.doAnswer(
-        new Answer<Void>() {
-          @Override
-          public Void answer(InvocationOnMock invocation) throws Throwable {
-            metadataDB.remove(DFSUtil.bytes2String(
-                (byte[])invocation.getArguments()[0]));
-            return null;
-          }
-        }).when(metadataManager).delete(any(byte[].class));
-
-    for(String volumeName : volumesToCreate) {
-      byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName);
-      metadataDB.put(OzoneConsts.KSM_VOLUME_PREFIX + volumeName,
-                     dummyVolumeInfo);
-    }
-    return metadataManager;
-  }
-
-  @Test
-  public void testCreateBucketWithoutVolume() throws IOException {
-    thrown.expectMessage("Volume doesn't exist");
-    KSMMetadataManager metaMgr = getMetadataManagerMock();
-    try {
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucketOne")
-          .build();
-      bucketManager.createBucket(bucketInfo);
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_VOLUME_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testCreateBucket() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", "bucketOne"));
-  }
-
-  @Test
-  public void testCreateAlreadyExistingBucket() throws IOException {
-    thrown.expectMessage("Bucket already exist");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    try {
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucketOne")
-          .build();
-      bucketManager.createBucket(bucketInfo);
-      bucketManager.createBucket(bucketInfo);
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_ALREADY_EXISTS,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testGetBucketInfoForInvalidBucket() throws IOException {
-    thrown.expectMessage("Bucket not found");
-    try {
-      KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-      bucketManager.getBucketInfo("sampleVol", "bucketOne");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testGetBucketInfo() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals("sampleVol", result.getVolumeName());
-    Assert.assertEquals("bucketOne", result.getBucketName());
-    Assert.assertEquals(StorageType.DISK,
-        result.getStorageType());
-    Assert.assertEquals(false, result.getIsVersionEnabled());
-  }
-
-  @Test
-  public void testSetBucketPropertyAddACL() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    List<OzoneAcl> acls = new LinkedList<>();
-    OzoneAcl ozoneAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "root", OzoneAcl.OzoneACLRights.READ);
-    acls.add(ozoneAcl);
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAcls(acls)
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals("sampleVol", result.getVolumeName());
-    Assert.assertEquals("bucketOne", result.getBucketName());
-    Assert.assertEquals(1, result.getAcls().size());
-    List<OzoneAcl> addAcls = new LinkedList<>();
-    OzoneAcl newAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "ozone", OzoneAcl.OzoneACLRights.READ);
-    addAcls.add(newAcl);
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAddAcls(addAcls)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(2, updatedResult.getAcls().size());
-    Assert.assertTrue(updatedResult.getAcls().contains(newAcl));
-  }
-
-  @Test
-  public void testSetBucketPropertyRemoveACL() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    List<OzoneAcl> acls = new LinkedList<>();
-    OzoneAcl aclOne = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "root", OzoneAcl.OzoneACLRights.READ);
-    OzoneAcl aclTwo = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-        "ozone", OzoneAcl.OzoneACLRights.READ);
-    acls.add(aclOne);
-    acls.add(aclTwo);
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setAcls(acls)
-        .setStorageType(StorageType.DISK)
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(2, result.getAcls().size());
-    List<OzoneAcl> removeAcls = new LinkedList<>();
-    removeAcls.add(aclTwo);
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setRemoveAcls(removeAcls)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(1, updatedResult.getAcls().size());
-    Assert.assertFalse(updatedResult.getAcls().contains(aclTwo));
-  }
-
-  @Test
-  public void testSetBucketPropertyChangeStorageType() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.DISK)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(StorageType.DISK,
-        result.getStorageType());
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setStorageType(StorageType.SSD)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertEquals(StorageType.SSD,
-        updatedResult.getStorageType());
-  }
-
-  @Test
-  public void testSetBucketPropertyChangeVersioning() throws IOException {
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setIsVersionEnabled(false)
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    KsmBucketInfo result = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertFalse(result.getIsVersionEnabled());
-    KsmBucketArgs bucketArgs = KsmBucketArgs.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .setIsVersionEnabled(true)
-        .build();
-    bucketManager.setBucketProperty(bucketArgs);
-    KsmBucketInfo updatedResult = bucketManager.getBucketInfo(
-        "sampleVol", "bucketOne");
-    Assert.assertTrue(updatedResult.getIsVersionEnabled());
-  }
-
-  @Test
-  public void testDeleteBucket() throws IOException {
-    thrown.expectMessage("Bucket not found");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    for(int i = 0; i < 5; i++) {
-      KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName("sampleVol")
-          .setBucketName("bucket_" + i)
-          .build();
-      bucketManager.createBucket(bucketInfo);
-    }
-    for(int i = 0; i < 5; i++) {
-      Assert.assertEquals("bucket_" + i,
-          bucketManager.getBucketInfo(
-              "sampleVol", "bucket_" + i).getBucketName());
-    }
-    try {
-      bucketManager.deleteBucket("sampleVol", "bucket_1");
-      Assert.assertNotNull(bucketManager.getBucketInfo(
-          "sampleVol", "bucket_2"));
-    } catch(IOException ex) {
-      Assert.fail(ex.getMessage());
-    }
-    try {
-      bucketManager.getBucketInfo("sampleVol", "bucket_1");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-
-  @Test
-  public void testDeleteNonEmptyBucket() throws IOException {
-    thrown.expectMessage("Bucket is not empty");
-    KSMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
-    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
-    KsmBucketInfo bucketInfo = KsmBucketInfo.newBuilder()
-        .setVolumeName("sampleVol")
-        .setBucketName("bucketOne")
-        .build();
-    bucketManager.createBucket(bucketInfo);
-    //Create keys in bucket
-    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_one"),
-        DFSUtil.string2Bytes("value_one"));
-    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_two"),
-        DFSUtil.string2Bytes("value_two"));
-    try {
-      bucketManager.deleteBucket("sampleVol", "bucketOne");
-    } catch(KSMException ksmEx) {
-      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_EMPTY,
-          ksmEx.getResult());
-      throw ksmEx;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
deleted file mode 100644
index e6158bd..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
-import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
-import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.OutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class tests ChunkGroupInputStream and ChunkGroupOutStream.
- */
-public class TestChunkStreams {
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * This test uses ByteArrayOutputStream as the underlying stream to test
-   * the correctness of ChunkGroupOutputStream.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testWriteGroupOutputStream() throws Exception {
-    try (ChunkGroupOutputStream groupOutputStream =
-             new ChunkGroupOutputStream()) {
-      ArrayList<OutputStream> outputStreams = new ArrayList<>();
-
-      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
-      // of them with 100 bytes.
-      for (int i = 0; i < 5; i++) {
-        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
-        outputStreams.add(out);
-        groupOutputStream.addStream(out, 100);
-      }
-      assertEquals(0, groupOutputStream.getByteOffset());
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] data = dataString.getBytes();
-      groupOutputStream.write(data, 0, data.length);
-      assertEquals(500, groupOutputStream.getByteOffset());
-
-      String res = "";
-      int offset = 0;
-      for (OutputStream stream : outputStreams) {
-        String subString = stream.toString();
-        res += subString;
-        assertEquals(dataString.substring(offset, offset + 100), subString);
-        offset += 100;
-      }
-      assertEquals(dataString, res);
-    }
-  }
-
-  @Test
-  public void testErrorWriteGroupOutputStream() throws Exception {
-    try (ChunkGroupOutputStream groupOutputStream =
-             new ChunkGroupOutputStream()) {
-      ArrayList<OutputStream> outputStreams = new ArrayList<>();
-
-      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
-      // of them with 100 bytes. all 5 streams makes up a ChunkGroupOutputStream
-      // with a total of 500 bytes in size
-      for (int i = 0; i < 5; i++) {
-        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
-        outputStreams.add(out);
-        groupOutputStream.addStream(out, 100);
-      }
-      assertEquals(0, groupOutputStream.getByteOffset());
-
-      // first writes of 100 bytes should succeed
-      groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes());
-      assertEquals(100, groupOutputStream.getByteOffset());
-
-      // second writes of 500 bytes should fail, as there should be only 400
-      // bytes space left
-      // TODO : if we decide to take the 400 bytes instead in the future,
-      // other add more informative error code rather than exception, need to
-      // change this part.
-      exception.expect(Exception.class);
-      groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes());
-      assertEquals(100, groupOutputStream.getByteOffset());
-    }
-  }
-
-  @Test
-  public void testReadGroupInputStream() throws Exception {
-    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
-      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] buf = dataString.getBytes();
-      int offset = 0;
-      for (int i = 0; i < 5; i++) {
-        int tempOffset = offset;
-        ChunkInputStream in =
-            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
-              private ByteArrayInputStream in =
-                  new ByteArrayInputStream(buf, tempOffset, 100);
-
-              @Override
-              public void seek(long pos) throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public long getPos() throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public boolean seekToNewSource(long targetPos)
-                  throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public int read() throws IOException {
-                return in.read();
-              }
-
-              @Override
-              public int read(byte[] b, int off, int len) throws IOException {
-                return in.read(b, off, len);
-              }
-            };
-        inputStreams.add(in);
-        offset += 100;
-        groupInputStream.addStream(in, 100);
-      }
-
-      byte[] resBuf = new byte[500];
-      int len = groupInputStream.read(resBuf, 0, 500);
-
-      assertEquals(500, len);
-      assertEquals(dataString, new String(resBuf));
-    }
-  }
-
-  @Test
-  public void testErrorReadGroupInputStream() throws Exception {
-    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
-      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
-
-      String dataString = RandomStringUtils.randomAscii(500);
-      byte[] buf = dataString.getBytes();
-      int offset = 0;
-      for (int i = 0; i < 5; i++) {
-        int tempOffset = offset;
-        ChunkInputStream in =
-            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
-              private ByteArrayInputStream in =
-                  new ByteArrayInputStream(buf, tempOffset, 100);
-
-              @Override
-              public void seek(long pos) throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public long getPos() throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public boolean seekToNewSource(long targetPos)
-                  throws IOException {
-                throw new UnsupportedOperationException();
-              }
-
-              @Override
-              public int read() throws IOException {
-                return in.read();
-              }
-
-              @Override
-              public int read(byte[] b, int off, int len) throws IOException {
-                return in.read(b, off, len);
-              }
-            };
-        inputStreams.add(in);
-        offset += 100;
-        groupInputStream.addStream(in, 100);
-      }
-
-      byte[] resBuf = new byte[600];
-      // read 300 bytes first
-      int len = groupInputStream.read(resBuf, 0, 340);
-      assertEquals(3, groupInputStream.getCurrentStreamIndex());
-      assertEquals(60, groupInputStream.getRemainingOfIndex(3));
-      assertEquals(340, len);
-      assertEquals(dataString.substring(0, 340),
-          new String(resBuf).substring(0, 340));
-
-      // read following 300 bytes, but only 200 left
-      len = groupInputStream.read(resBuf, 340, 260);
-      assertEquals(5, groupInputStream.getCurrentStreamIndex());
-      assertEquals(0, groupInputStream.getRemainingOfIndex(4));
-      assertEquals(160, len);
-      assertEquals(dataString, new String(resBuf).substring(0, 500));
-
-      // further read should get EOF
-      len = groupInputStream.read(resBuf, 0, 1);
-      // reached EOF, further read should get -1
-      assertEquals(-1, len);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
deleted file mode 100644
index b263df5..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpConfig.Policy;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
-
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.net.URL;
-import java.net.URLConnection;
-import java.util.Arrays;
-import java.util.Collection;
-
-/**
- * Test http server os KSM with various HTTP option.
- */
-@RunWith(value = Parameterized.class)
-public class TestKeySpaceManagerHttpServer {
-  private static final String BASEDIR = GenericTestUtils
-      .getTempPath(TestKeySpaceManagerHttpServer.class.getSimpleName());
-  private static String keystoresDir;
-  private static String sslConfDir;
-  private static Configuration conf;
-  private static URLConnectionFactory connectionFactory;
-
-  @Parameters public static Collection<Object[]> policy() {
-    Object[][] params = new Object[][] {
-        {HttpConfig.Policy.HTTP_ONLY},
-        {HttpConfig.Policy.HTTPS_ONLY},
-        {HttpConfig.Policy.HTTP_AND_HTTPS} };
-    return Arrays.asList(params);
-  }
-
-  private final HttpConfig.Policy policy;
-
-  public TestKeySpaceManagerHttpServer(Policy policy) {
-    super();
-    this.policy = policy;
-  }
-
-  @BeforeClass public static void setUp() throws Exception {
-    File base = new File(BASEDIR);
-    FileUtil.fullyDelete(base);
-    base.mkdirs();
-    conf = new Configuration();
-    keystoresDir = new File(BASEDIR).getAbsolutePath();
-    sslConfDir = KeyStoreTestUtil.getClasspathDir(
-        TestKeySpaceManagerHttpServer.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
-    connectionFactory =
-        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
-    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getClientSSLConfigFileName());
-    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
-        KeyStoreTestUtil.getServerSSLConfigFileName());
-  }
-
-  @AfterClass public static void tearDown() throws Exception {
-    FileUtil.fullyDelete(new File(BASEDIR));
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
-  }
-
-  @Test public void testHttpPolicy() throws Exception {
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-    conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
-
-    InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
-    KeySpaceManagerHttpServer server = null;
-    try {
-      server = new KeySpaceManagerHttpServer(conf, null);
-      server.start();
-
-      Assert.assertTrue(implies(policy.isHttpEnabled(),
-          canAccess("http", server.getHttpAddress())));
-      Assert.assertTrue(
-          implies(!policy.isHttpEnabled(), server.getHttpAddress() == null));
-
-      Assert.assertTrue(implies(policy.isHttpsEnabled(),
-          canAccess("https", server.getHttpsAddress())));
-      Assert.assertTrue(
-          implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null));
-
-    } finally {
-      if (server != null) {
-        server.stop();
-      }
-    }
-  }
-
-  private static boolean canAccess(String scheme, InetSocketAddress addr) {
-    if (addr == null) {
-      return false;
-    }
-    try {
-      URL url =
-          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
-      URLConnection conn = connectionFactory.openConnection(url);
-      conn.connect();
-      conn.getContent();
-    } catch (Exception e) {
-      return false;
-    }
-    return true;
-  }
-
-  private static boolean implies(boolean a, boolean b) {
-    return !a || b;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
deleted file mode 100644
index 089ff4b..0000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-/**
- * KSM tests
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
new file mode 100644
index 0000000..1ecac7f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.runners.MockitoJUnitRunner;
+import org.mockito.stubbing.Answer;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.LinkedList;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static org.mockito.Mockito.any;
+
+/**
+ * Tests BucketManagerImpl, mocks OMMetadataManager for testing.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class TestBucketManagerImpl {
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  private OMMetadataManager getMetadataManagerMock(String... volumesToCreate)
+      throws IOException {
+    OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class);
+    Map<String, byte[]> metadataDB = new HashMap<>();
+    ReadWriteLock lock = new ReentrantReadWriteLock();
+
+    Mockito.when(metadataManager.writeLock()).thenReturn(lock.writeLock());
+    Mockito.when(metadataManager.readLock()).thenReturn(lock.readLock());
+    Mockito.when(metadataManager.getVolumeKey(any(String.class))).thenAnswer(
+        (InvocationOnMock invocation) ->
+            DFSUtil.string2Bytes(
+                OzoneConsts.OM_VOLUME_PREFIX + invocation.getArguments()[0]));
+    Mockito.when(metadataManager
+        .getBucketKey(any(String.class), any(String.class))).thenAnswer(
+            (InvocationOnMock invocation) ->
+                DFSUtil.string2Bytes(
+                    OzoneConsts.OM_VOLUME_PREFIX
+                        + invocation.getArguments()[0]
+                        + OzoneConsts.OM_BUCKET_PREFIX
+                        + invocation.getArguments()[1]));
+
+    Mockito.doAnswer(
+        new Answer<Boolean>() {
+          @Override
+          public Boolean answer(InvocationOnMock invocation)
+              throws Throwable {
+            String keyRootName =  OzoneConsts.OM_KEY_PREFIX
+                + invocation.getArguments()[0]
+                + OzoneConsts.OM_KEY_PREFIX
+                + invocation.getArguments()[1]
+                + OzoneConsts.OM_KEY_PREFIX;
+            Iterator<String> keyIterator = metadataDB.keySet().iterator();
+            while(keyIterator.hasNext()) {
+              if(keyIterator.next().startsWith(keyRootName)) {
+                return false;
+              }
+            }
+            return true;
+          }
+        }).when(metadataManager).isBucketEmpty(any(String.class),
+        any(String.class));
+
+    Mockito.doAnswer(
+        new Answer<Void>() {
+          @Override
+          public Void answer(InvocationOnMock invocation) throws Throwable {
+            metadataDB.put(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]),
+                (byte[])invocation.getArguments()[1]);
+            return null;
+          }
+        }).when(metadataManager).put(any(byte[].class), any(byte[].class));
+
+    Mockito.when(metadataManager.get(any(byte[].class))).thenAnswer(
+        (InvocationOnMock invocation) ->
+            metadataDB.get(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]))
+    );
+    Mockito.doAnswer(
+        new Answer<Void>() {
+          @Override
+          public Void answer(InvocationOnMock invocation) throws Throwable {
+            metadataDB.remove(DFSUtil.bytes2String(
+                (byte[])invocation.getArguments()[0]));
+            return null;
+          }
+        }).when(metadataManager).delete(any(byte[].class));
+
+    for(String volumeName : volumesToCreate) {
+      byte[] dummyVolumeInfo = DFSUtil.string2Bytes(volumeName);
+      metadataDB.put(OzoneConsts.OM_VOLUME_PREFIX + volumeName,
+                     dummyVolumeInfo);
+    }
+    return metadataManager;
+  }
+
+  @Test
+  public void testCreateBucketWithoutVolume() throws IOException {
+    thrown.expectMessage("Volume doesn't exist");
+    OMMetadataManager metaMgr = getMetadataManagerMock();
+    try {
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucketOne")
+          .build();
+      bucketManager.createBucket(bucketInfo);
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_VOLUME_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testCreateBucket() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    Assert.assertNotNull(bucketManager.getBucketInfo("sampleVol", "bucketOne"));
+  }
+
+  @Test
+  public void testCreateAlreadyExistingBucket() throws IOException {
+    thrown.expectMessage("Bucket already exist");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    try {
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucketOne")
+          .build();
+      bucketManager.createBucket(bucketInfo);
+      bucketManager.createBucket(bucketInfo);
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_ALREADY_EXISTS,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testGetBucketInfoForInvalidBucket() throws IOException {
+    thrown.expectMessage("Bucket not found");
+    try {
+      OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+      BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+      bucketManager.getBucketInfo("sampleVol", "bucketOne");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testGetBucketInfo() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals("sampleVol", result.getVolumeName());
+    Assert.assertEquals("bucketOne", result.getBucketName());
+    Assert.assertEquals(StorageType.DISK,
+        result.getStorageType());
+    Assert.assertEquals(false, result.getIsVersionEnabled());
+  }
+
+  @Test
+  public void testSetBucketPropertyAddACL() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    List<OzoneAcl> acls = new LinkedList<>();
+    OzoneAcl ozoneAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "root", OzoneAcl.OzoneACLRights.READ);
+    acls.add(ozoneAcl);
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAcls(acls)
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals("sampleVol", result.getVolumeName());
+    Assert.assertEquals("bucketOne", result.getBucketName());
+    Assert.assertEquals(1, result.getAcls().size());
+    List<OzoneAcl> addAcls = new LinkedList<>();
+    OzoneAcl newAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "ozone", OzoneAcl.OzoneACLRights.READ);
+    addAcls.add(newAcl);
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAddAcls(addAcls)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(2, updatedResult.getAcls().size());
+    Assert.assertTrue(updatedResult.getAcls().contains(newAcl));
+  }
+
+  @Test
+  public void testSetBucketPropertyRemoveACL() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    List<OzoneAcl> acls = new LinkedList<>();
+    OzoneAcl aclOne = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "root", OzoneAcl.OzoneACLRights.READ);
+    OzoneAcl aclTwo = new OzoneAcl(OzoneAcl.OzoneACLType.USER,
+        "ozone", OzoneAcl.OzoneACLRights.READ);
+    acls.add(aclOne);
+    acls.add(aclTwo);
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setAcls(acls)
+        .setStorageType(StorageType.DISK)
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(2, result.getAcls().size());
+    List<OzoneAcl> removeAcls = new LinkedList<>();
+    removeAcls.add(aclTwo);
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setRemoveAcls(removeAcls)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(1, updatedResult.getAcls().size());
+    Assert.assertFalse(updatedResult.getAcls().contains(aclTwo));
+  }
+
+  @Test
+  public void testSetBucketPropertyChangeStorageType() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.DISK)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(StorageType.DISK,
+        result.getStorageType());
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setStorageType(StorageType.SSD)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertEquals(StorageType.SSD,
+        updatedResult.getStorageType());
+  }
+
+  @Test
+  public void testSetBucketPropertyChangeVersioning() throws IOException {
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setIsVersionEnabled(false)
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    OmBucketInfo result = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertFalse(result.getIsVersionEnabled());
+    OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .setIsVersionEnabled(true)
+        .build();
+    bucketManager.setBucketProperty(bucketArgs);
+    OmBucketInfo updatedResult = bucketManager.getBucketInfo(
+        "sampleVol", "bucketOne");
+    Assert.assertTrue(updatedResult.getIsVersionEnabled());
+  }
+
+  @Test
+  public void testDeleteBucket() throws IOException {
+    thrown.expectMessage("Bucket not found");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    for(int i = 0; i < 5; i++) {
+      OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName("sampleVol")
+          .setBucketName("bucket_" + i)
+          .build();
+      bucketManager.createBucket(bucketInfo);
+    }
+    for(int i = 0; i < 5; i++) {
+      Assert.assertEquals("bucket_" + i,
+          bucketManager.getBucketInfo(
+              "sampleVol", "bucket_" + i).getBucketName());
+    }
+    try {
+      bucketManager.deleteBucket("sampleVol", "bucket_1");
+      Assert.assertNotNull(bucketManager.getBucketInfo(
+          "sampleVol", "bucket_2"));
+    } catch(IOException ex) {
+      Assert.fail(ex.getMessage());
+    }
+    try {
+      bucketManager.getBucketInfo("sampleVol", "bucket_1");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_FOUND,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+
+  @Test
+  public void testDeleteNonEmptyBucket() throws IOException {
+    thrown.expectMessage("Bucket is not empty");
+    OMMetadataManager metaMgr = getMetadataManagerMock("sampleVol");
+    BucketManager bucketManager = new BucketManagerImpl(metaMgr);
+    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+        .setVolumeName("sampleVol")
+        .setBucketName("bucketOne")
+        .build();
+    bucketManager.createBucket(bucketInfo);
+    //Create keys in bucket
+    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_one"),
+        DFSUtil.string2Bytes("value_one"));
+    metaMgr.put(DFSUtil.string2Bytes("/sampleVol/bucketOne/key_two"),
+        DFSUtil.string2Bytes("value_two"));
+    try {
+      bucketManager.deleteBucket("sampleVol", "bucketOne");
+    } catch(OMException omEx) {
+      Assert.assertEquals(ResultCodes.FAILED_BUCKET_NOT_EMPTY,
+          omEx.getResult());
+      throw omEx;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
new file mode 100644
index 0000000..7ce916a
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java
@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
+import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
+import org.apache.hadoop.hdds.scm.storage.ChunkInputStream;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class tests ChunkGroupInputStream and ChunkGroupOutStream.
+ */
+public class TestChunkStreams {
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * This test uses ByteArrayOutputStream as the underlying stream to test
+   * the correctness of ChunkGroupOutputStream.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testWriteGroupOutputStream() throws Exception {
+    try (ChunkGroupOutputStream groupOutputStream =
+             new ChunkGroupOutputStream()) {
+      ArrayList<OutputStream> outputStreams = new ArrayList<>();
+
+      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
+      // of them with 100 bytes.
+      for (int i = 0; i < 5; i++) {
+        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
+        outputStreams.add(out);
+        groupOutputStream.addStream(out, 100);
+      }
+      assertEquals(0, groupOutputStream.getByteOffset());
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] data = dataString.getBytes();
+      groupOutputStream.write(data, 0, data.length);
+      assertEquals(500, groupOutputStream.getByteOffset());
+
+      String res = "";
+      int offset = 0;
+      for (OutputStream stream : outputStreams) {
+        String subString = stream.toString();
+        res += subString;
+        assertEquals(dataString.substring(offset, offset + 100), subString);
+        offset += 100;
+      }
+      assertEquals(dataString, res);
+    }
+  }
+
+  @Test
+  public void testErrorWriteGroupOutputStream() throws Exception {
+    try (ChunkGroupOutputStream groupOutputStream =
+             new ChunkGroupOutputStream()) {
+      ArrayList<OutputStream> outputStreams = new ArrayList<>();
+
+      // 5 byte streams, each 100 bytes. write 500 bytes means writing to each
+      // of them with 100 bytes. all 5 streams makes up a ChunkGroupOutputStream
+      // with a total of 500 bytes in size
+      for (int i = 0; i < 5; i++) {
+        ByteArrayOutputStream out = new ByteArrayOutputStream(100);
+        outputStreams.add(out);
+        groupOutputStream.addStream(out, 100);
+      }
+      assertEquals(0, groupOutputStream.getByteOffset());
+
+      // first writes of 100 bytes should succeed
+      groupOutputStream.write(RandomStringUtils.randomAscii(100).getBytes());
+      assertEquals(100, groupOutputStream.getByteOffset());
+
+      // second writes of 500 bytes should fail, as there should be only 400
+      // bytes space left
+      // TODO : if we decide to take the 400 bytes instead in the future,
+      // other add more informative error code rather than exception, need to
+      // change this part.
+      exception.expect(Exception.class);
+      groupOutputStream.write(RandomStringUtils.randomAscii(500).getBytes());
+      assertEquals(100, groupOutputStream.getByteOffset());
+    }
+  }
+
+  @Test
+  public void testReadGroupInputStream() throws Exception {
+    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
+      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] buf = dataString.getBytes();
+      int offset = 0;
+      for (int i = 0; i < 5; i++) {
+        int tempOffset = offset;
+        ChunkInputStream in =
+            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
+              private ByteArrayInputStream in =
+                  new ByteArrayInputStream(buf, tempOffset, 100);
+
+              @Override
+              public void seek(long pos) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public long getPos() throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean seekToNewSource(long targetPos)
+                  throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public int read() throws IOException {
+                return in.read();
+              }
+
+              @Override
+              public int read(byte[] b, int off, int len) throws IOException {
+                return in.read(b, off, len);
+              }
+            };
+        inputStreams.add(in);
+        offset += 100;
+        groupInputStream.addStream(in, 100);
+      }
+
+      byte[] resBuf = new byte[500];
+      int len = groupInputStream.read(resBuf, 0, 500);
+
+      assertEquals(500, len);
+      assertEquals(dataString, new String(resBuf));
+    }
+  }
+
+  @Test
+  public void testErrorReadGroupInputStream() throws Exception {
+    try (ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream()) {
+      ArrayList<ChunkInputStream> inputStreams = new ArrayList<>();
+
+      String dataString = RandomStringUtils.randomAscii(500);
+      byte[] buf = dataString.getBytes();
+      int offset = 0;
+      for (int i = 0; i < 5; i++) {
+        int tempOffset = offset;
+        ChunkInputStream in =
+            new ChunkInputStream(null, null, null, new ArrayList<>(), null) {
+              private ByteArrayInputStream in =
+                  new ByteArrayInputStream(buf, tempOffset, 100);
+
+              @Override
+              public void seek(long pos) throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public long getPos() throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public boolean seekToNewSource(long targetPos)
+                  throws IOException {
+                throw new UnsupportedOperationException();
+              }
+
+              @Override
+              public int read() throws IOException {
+                return in.read();
+              }
+
+              @Override
+              public int read(byte[] b, int off, int len) throws IOException {
+                return in.read(b, off, len);
+              }
+            };
+        inputStreams.add(in);
+        offset += 100;
+        groupInputStream.addStream(in, 100);
+      }
+
+      byte[] resBuf = new byte[600];
+      // read 300 bytes first
+      int len = groupInputStream.read(resBuf, 0, 340);
+      assertEquals(3, groupInputStream.getCurrentStreamIndex());
+      assertEquals(60, groupInputStream.getRemainingOfIndex(3));
+      assertEquals(340, len);
+      assertEquals(dataString.substring(0, 340),
+          new String(resBuf).substring(0, 340));
+
+      // read following 300 bytes, but only 200 left
+      len = groupInputStream.read(resBuf, 340, 260);
+      assertEquals(5, groupInputStream.getCurrentStreamIndex());
+      assertEquals(0, groupInputStream.getRemainingOfIndex(4));
+      assertEquals(160, len);
+      assertEquals(dataString, new String(resBuf).substring(0, 500));
+
+      // further read should get EOF
+      len = groupInputStream.read(resBuf, 0, 1);
+      // reached EOF, further read should get -1
+      assertEquals(-1, len);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
new file mode 100644
index 0000000..3e11a13
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.http.HttpConfig.Policy;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Arrays;
+import java.util.Collection;
+
+/**
+ * Test http server of OM with various HTTP option.
+ */
+@RunWith(value = Parameterized.class)
+public class TestOzoneManagerHttpServer {
+  private static final String BASEDIR = GenericTestUtils
+      .getTempPath(TestOzoneManagerHttpServer.class.getSimpleName());
+  private static String keystoresDir;
+  private static String sslConfDir;
+  private static Configuration conf;
+  private static URLConnectionFactory connectionFactory;
+
+  @Parameters public static Collection<Object[]> policy() {
+    Object[][] params = new Object[][] {
+        {HttpConfig.Policy.HTTP_ONLY},
+        {HttpConfig.Policy.HTTPS_ONLY},
+        {HttpConfig.Policy.HTTP_AND_HTTPS} };
+    return Arrays.asList(params);
+  }
+
+  private final HttpConfig.Policy policy;
+
+  public TestOzoneManagerHttpServer(Policy policy) {
+    super();
+    this.policy = policy;
+  }
+
+  @BeforeClass public static void setUp() throws Exception {
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    base.mkdirs();
+    conf = new Configuration();
+    keystoresDir = new File(BASEDIR).getAbsolutePath();
+    sslConfDir = KeyStoreTestUtil.getClasspathDir(
+        TestOzoneManagerHttpServer.class);
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+    connectionFactory =
+        URLConnectionFactory.newDefaultURLConnectionFactory(conf);
+    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getClientSSLConfigFileName());
+    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
+        KeyStoreTestUtil.getServerSSLConfigFileName());
+  }
+
+  @AfterClass public static void tearDown() throws Exception {
+    FileUtil.fullyDelete(new File(BASEDIR));
+    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
+  }
+
+  @Test public void testHttpPolicy() throws Exception {
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
+    conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
+
+    InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
+    OzoneManagerHttpServer server = null;
+    try {
+      server = new OzoneManagerHttpServer(conf, null);
+      server.start();
+
+      Assert.assertTrue(implies(policy.isHttpEnabled(),
+          canAccess("http", server.getHttpAddress())));
+      Assert.assertTrue(
+          implies(!policy.isHttpEnabled(), server.getHttpAddress() == null));
+
+      Assert.assertTrue(implies(policy.isHttpsEnabled(),
+          canAccess("https", server.getHttpsAddress())));
+      Assert.assertTrue(
+          implies(!policy.isHttpsEnabled(), server.getHttpsAddress() == null));
+
+    } finally {
+      if (server != null) {
+        server.stop();
+      }
+    }
+  }
+
+  private static boolean canAccess(String scheme, InetSocketAddress addr) {
+    if (addr == null) {
+      return false;
+    }
+    try {
+      URL url =
+          new URL(scheme + "://" + NetUtils.getHostPortString(addr) + "/jmx");
+      URLConnection conn = connectionFactory.openConnection(url);
+      conn.connect();
+      conn.getContent();
+    } catch (Exception e) {
+      return false;
+    }
+    return true;
+  }
+
+  private static boolean implies(boolean a, boolean b) {
+    return !a || b;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..12fcf7c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+/**
+ * OM tests
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
index 8417e46..b63e182 100644
--- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
+++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.web.handlers.UserArgs;
 import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
 import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.junit.Assert;
 
@@ -109,7 +109,7 @@ class OzoneContract extends AbstractFSContract {
     String uri = String.format("%s://%s.%s/",
         OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
     getConf().set("fs.defaultFS", uri);
-    copyClusterConfigs(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY);
+    copyClusterConfigs(OMConfigKeys.OZONE_OM_ADDRESS_KEY);
     copyClusterConfigs(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
     return FileSystem.get(getConf());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index 26776c5..3884edd 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -32,11 +32,11 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.util.Tool;
@@ -60,10 +60,10 @@ import java.util.HashSet;
 import java.util.Set;
 
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_USER_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_BUCKET_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_VOLUME_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_USER_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_BUCKET_PREFIX;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_VOLUME_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
 
 /**
@@ -120,7 +120,7 @@ public class SQLCLI  extends Configured implements Tool {
       "INSERT INTO openContainer (containerName, containerUsed) " +
           "VALUES (\"%s\", \"%s\")";
 
-  // for ksm.db
+  // for om.db
   private static final String CREATE_VOLUME_LIST =
       "CREATE TABLE volumeList (" +
           "userName TEXT NOT NULL," +
@@ -278,9 +278,9 @@ public class SQLCLI  extends Configured implements Tool {
     } else if (dbName.toString().equals(OPEN_CONTAINERS_DB)) {
       LOG.info("Converting open container DB");
       convertOpenContainerDB(dbPath, outPath);
-    } else if (dbName.toString().equals(KSM_DB_NAME)) {
-      LOG.info("Converting ksm DB");
-      convertKSMDB(dbPath, outPath);
+    } else if (dbName.toString().equals(OM_DB_NAME)) {
+      LOG.info("Converting om DB");
+      convertOMDB(dbPath, outPath);
     } else {
       LOG.error("Unrecognized db name {}", dbName);
     }
@@ -301,7 +301,7 @@ public class SQLCLI  extends Configured implements Tool {
   }
 
   /**
-   * Convert ksm.db to sqlite db file. With following schema.
+   * Convert om.db to sqlite db file. With following schema.
    * (* for primary key)
    *
    * 1. for key type USER, it contains a username and a list volumes
@@ -341,8 +341,8 @@ public class SQLCLI  extends Configured implements Tool {
    * @param outPath
    * @throws Exception
    */
-  private void convertKSMDB(Path dbPath, Path outPath) throws Exception {
-    LOG.info("Create tables for sql ksm db.");
+  private void convertOMDB(Path dbPath, Path outPath) throws Exception {
+    LOG.info("Create tables for sql om db.");
     File dbFile = dbPath.toFile();
     try (MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
         .setConf(conf).setDbFile(dbFile).build();
@@ -357,7 +357,7 @@ public class SQLCLI  extends Configured implements Tool {
         String keyString = DFSUtilClient.bytes2String(key);
         KeyType type = getKeyType(keyString);
         try {
-          insertKSMDB(conn, type, keyString, value);
+          insertOMDB(conn, type, keyString, value);
         } catch (IOException | SQLException ex) {
           LOG.error("Exception inserting key {} type {}", keyString, type, ex);
         }
@@ -366,8 +366,8 @@ public class SQLCLI  extends Configured implements Tool {
     }
   }
 
-  private void insertKSMDB(Connection conn, KeyType type, String keyName,
-      byte[] value) throws IOException, SQLException {
+  private void insertOMDB(Connection conn, KeyType type, String keyName,
+                          byte[] value) throws IOException, SQLException {
     switch (type) {
     case USER:
       VolumeList volumeList = VolumeList.parseFrom(value);
@@ -412,16 +412,16 @@ public class SQLCLI  extends Configured implements Tool {
       executeSQL(conn, insertKeyInfo);
       break;
     default:
-      throw new IOException("Unknown key from ksm.db");
+      throw new IOException("Unknown key from om.db");
     }
   }
 
   private KeyType getKeyType(String key) {
-    if (key.startsWith(KSM_USER_PREFIX)) {
+    if (key.startsWith(OM_USER_PREFIX)) {
       return KeyType.USER;
-    } else if (key.startsWith(KSM_VOLUME_PREFIX)) {
-      return key.replaceFirst(KSM_VOLUME_PREFIX, "")
-          .contains(KSM_BUCKET_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME;
+    } else if (key.startsWith(OM_VOLUME_PREFIX)) {
+      return key.replaceFirst(OM_VOLUME_PREFIX, "")
+          .contains(OM_BUCKET_PREFIX) ? KeyType.BUCKET : KeyType.VOLUME;
     }else {
       return KeyType.KEY;
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: 13310 v006

Posted by vi...@apache.org.
13310 v006


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b210ee3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b210ee3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b210ee3e

Branch: refs/heads/HDFS-12090
Commit: b210ee3ec821abf5bb49aaa821c7c2dff9cbd028
Parents: eecb5ba
Author: Virajith Jalaparti <vi...@apache.org>
Authored: Mon Jul 9 11:24:06 2018 -0700
Committer: Virajith Jalaparti <vi...@apache.org>
Committed: Mon Jul 9 11:24:06 2018 -0700

----------------------------------------------------------------------
 .../BlockSyncTaskExecutionFeedback.java         |  67 ++++++
 .../protocol/SyncTaskExecutionOutcome.java      |  25 +++
 .../protocol/SyncTaskExecutionResult.java       |  46 +++++
 .../DatanodeProtocolClientSideTranslatorPB.java |   8 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   6 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 207 ++++++++++++++++++-
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../hdfs/server/datanode/BPServiceActor.java    |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   7 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +-
 .../hdfs/server/protocol/BlockSyncTask.java     |  83 ++++++++
 .../protocol/BulkSyncTaskExecutionFeedback.java |  36 ++++
 .../hdfs/server/protocol/DatanodeProtocol.java  |  22 +-
 .../hdfs/server/protocol/SyncCommand.java       |  39 ++++
 .../src/main/proto/DatanodeProtocol.proto       |  88 +++++++-
 .../blockmanagement/TestDatanodeManager.java    |   2 +-
 .../TestNameNodePrunesMissingStorages.java      |   2 +-
 .../datanode/InternalDataNodeTestUtils.java     |   3 +-
 .../server/datanode/TestBPOfferService.java     |   5 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   9 +-
 .../TestDatanodeProtocolRetryPolicy.java        |   4 +-
 .../server/datanode/TestFsDatasetCache.java     |   4 +-
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   8 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   5 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 27 files changed, 661 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
new file mode 100644
index 0000000..2e5393e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTaskExecutionFeedback.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.UUID;
+
+/**
+ * Feedback for a BlockSyncTask.
+ */
+public class BlockSyncTaskExecutionFeedback {
+
+  private UUID syncTaskId;
+  private SyncTaskExecutionOutcome outcome;
+  private SyncTaskExecutionResult result;
+  private String syncMountId;
+
+  public BlockSyncTaskExecutionFeedback(UUID syncTaskId,
+      SyncTaskExecutionOutcome outcome, SyncTaskExecutionResult result,
+      String syncMountId) {
+    this.syncTaskId = syncTaskId;
+    this.outcome = outcome;
+    this.result = result;
+    this.syncMountId = syncMountId;
+  }
+
+  public static BlockSyncTaskExecutionFeedback finishedSuccessfully(
+      UUID syncTaskId, String syncMountId, SyncTaskExecutionResult result) {
+    return new BlockSyncTaskExecutionFeedback(syncTaskId,
+        SyncTaskExecutionOutcome.FINISHED_SUCCESSFULLY, result, syncMountId);
+  }
+
+  public static BlockSyncTaskExecutionFeedback failedWithException(
+      UUID syncTaskId, String syncMountId, Exception e) {
+    return new BlockSyncTaskExecutionFeedback(syncTaskId,
+        SyncTaskExecutionOutcome.EXCEPTION, null, syncMountId);
+  }
+
+  public UUID getSyncTaskId() {
+    return syncTaskId;
+  }
+
+  public SyncTaskExecutionOutcome getOutcome() {
+    return outcome;
+  }
+
+  public SyncTaskExecutionResult getResult() {
+    return result;
+  }
+
+  public String getSyncMountId() {
+    return syncMountId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncTaskExecutionOutcome.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncTaskExecutionOutcome.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncTaskExecutionOutcome.java
new file mode 100644
index 0000000..492575b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncTaskExecutionOutcome.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+/**
+ * SyncTaskExecutionOutcome is whether the SyncTask was successful or not.
+ */
+public enum SyncTaskExecutionOutcome {
+  FINISHED_SUCCESSFULLY,
+  EXCEPTION
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncTaskExecutionResult.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncTaskExecutionResult.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncTaskExecutionResult.java
new file mode 100644
index 0000000..b623dc5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncTaskExecutionResult.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Result of a SyncTask.
+ */
+public class SyncTaskExecutionResult {
+
+  /** result is the opaque byte stream result of a task. e.g. PartHandle */
+  private ByteBuffer result;
+  private Long numberOfBytes;
+
+  public SyncTaskExecutionResult(ByteBuffer result, Long numberOfBytes) {
+    this.result = result;
+    this.numberOfBytes = numberOfBytes;
+  }
+
+  public static SyncTaskExecutionResult emptyResult() {
+    return new SyncTaskExecutionResult(ByteBuffer.wrap(new byte[0]), 0L);
+  }
+
+  public ByteBuffer getResult() {
+    return result;
+  }
+
+  public Long getNumberOfBytes() {
+    return numberOfBytes;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index 9cc4516..20b314c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -138,7 +139,8 @@ public class DatanodeProtocolClientSideTranslatorPB implements
       VolumeFailureSummary volumeFailureSummary,
       boolean requestFullBlockReportLease,
       @Nonnull SlowPeerReports slowPeers,
-      @Nonnull SlowDiskReports slowDisks) throws IOException {
+      @Nonnull SlowDiskReports slowDisks,
+      BulkSyncTaskExecutionFeedback feedback) throws IOException {
     HeartbeatRequestProto.Builder builder = HeartbeatRequestProto.newBuilder()
         .setRegistration(PBHelper.convert(registration))
         .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
@@ -161,6 +163,10 @@ public class DatanodeProtocolClientSideTranslatorPB implements
     if (slowDisks.haveSlowDisks()) {
       builder.addAllSlowDisks(PBHelper.convertSlowDiskInfo(slowDisks));
     }
+    if(feedback != null && !feedback.getFeedbacks().isEmpty()) {
+      builder.setBulkSyncTaskExecutionFeedback(PBHelper.convert(feedback));
+    }
+
     HeartbeatResponseProto resp;
     try {
       resp = rpcProxy.sendHeartbeat(NULL_CONTROLLER, builder.build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
index 5cba284..a51ce85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
@@ -109,6 +109,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
   @Override
   public HeartbeatResponseProto sendHeartbeat(RpcController controller,
       HeartbeatRequestProto request) throws ServiceException {
+
     HeartbeatResponse response;
     try {
       final StorageReport[] report = PBHelperClient.convertStorageReports(
@@ -122,7 +123,10 @@ public class DatanodeProtocolServerSideTranslatorPB implements
           request.getXceiverCount(), request.getFailedVolumes(),
           volumeFailureSummary, request.getRequestFullBlockReportLease(),
           PBHelper.convertSlowPeerInfo(request.getSlowPeersList()),
-          PBHelper.convertSlowDiskInfo(request.getSlowDisksList()));
+          PBHelper.convertSlowDiskInfo(request.getSlowDisksList()),
+          PBHelper.convertBulkSyncTaskExecutionFeedback(
+              request.getBulkSyncTaskExecutionFeedback())
+      );
     } catch (IOException e) {
       throw new ServiceException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index ac01348..1de286d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.protocolPB;
 
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -24,9 +27,10 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.UUID;
+import java.util.stream.Collectors;
 
 import com.google.protobuf.ByteString;
-
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSUtilClient;
@@ -43,38 +47,44 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommand
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockSyncTaskProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BulkSyncTaskExecutionFeedbackProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos
-    .SlowDiskReportProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowDiskReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SlowPeerReportProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SyncCommandProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SyncTaskExecutionFeedbackProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SyncTaskExecutionOutcomeProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SyncTaskExecutionResultProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.SyncTaskIdProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECReconstructionInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ProvidedStorageLocationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.CheckpointSignatureProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto.NamenodeRoleProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamespaceInfoProto;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NNHAStatusHeartbeatProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RecoveringBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogManifestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.RemoteEditLogProto;
@@ -89,18 +99,23 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
+import org.apache.hadoop.hdfs.server.protocol.BlockSyncTask;
+import org.apache.hadoop.hdfs.server.protocol.BlockSyncTaskExecutionFeedback;
+import org.apache.hadoop.hdfs.server.protocol.SyncTaskExecutionOutcome;
+import org.apache.hadoop.hdfs.server.protocol.SyncTaskExecutionResult;
 import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
-import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringStripedBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.StripedBlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -119,7 +134,9 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
+import org.apache.hadoop.hdfs.server.protocol.SyncCommand;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
+import org.apache.hadoop.ipc.ClientId;
 
 /**
  * Utilities for converting protobuf classes to and from implementation classes
@@ -469,11 +486,52 @@ public class PBHelper {
       return PBHelper.convert(proto.getBlkIdCmd());
     case BlockECReconstructionCommand:
       return PBHelper.convert(proto.getBlkECReconstructionCmd());
+    case SyncCommand:
+      return PBHelper.convert(proto.getSyncCommand());
     default:
       return null;
     }
   }
-  
+
+  private static SyncCommand convert(SyncCommandProto backupCommand) {
+    List<BlockSyncTaskProto> syncTasksProtoList =
+        backupCommand.getSyncTasksList();
+    List<BlockSyncTask> syncTasksList =
+        new ArrayList(syncTasksProtoList.size());
+    for (BlockSyncTaskProto syncTaskProto : syncTasksProtoList) {
+      syncTasksList.add(convertSyncTask(syncTaskProto));
+    }
+
+    return new SyncCommand(DatanodeProtocol.DNA_BACKUP, syncTasksList);
+  }
+
+  private static BlockSyncTask convertSyncTask(
+      BlockSyncTaskProto syncTaskProto) {
+    SyncTaskIdProto syncTaskIdProto = syncTaskProto.getSyncTaskId();
+    UUID syncTaskId = convert(syncTaskIdProto);
+    try {
+      return new BlockSyncTask(syncTaskId,
+          new URI(syncTaskProto.getUri()),
+          PBHelperClient.convertLocatedBlocks(
+              syncTaskProto.getLocatedBlocksList()),
+          syncTaskProto.getPartNumber(),
+          syncTaskProto.getUploadHandle().toByteArray(),
+          syncTaskProto.getOffset(),
+          syncTaskProto.getLength(),
+          syncTaskIdProto.getSyncMountId());
+    } catch (URISyntaxException e) {
+      throw new IllegalArgumentException();
+    }
+  }
+
+  public static UUID convert(SyncTaskIdProto syncTaskIdProto) {
+    byte[] clientId = syncTaskIdProto.getSyncTaskId().toByteArray();
+    long syncTaskIdMsb = ClientId.getMsb(clientId);
+    long syncTaskIdLsb = ClientId.getLsb(clientId);
+    return new UUID(syncTaskIdMsb, syncTaskIdLsb);
+  }
+
+
   public static BalancerBandwidthCommandProto convert(
       BalancerBandwidthCommand bbCmd) {
     return BalancerBandwidthCommandProto.newBuilder()
@@ -603,6 +661,10 @@ public class PBHelper {
           .setBlkECReconstructionCmd(
               convert((BlockECReconstructionCommand) datanodeCommand));
       break;
+    case DatanodeProtocol.DNA_BACKUP:
+      builder.setCmdType(DatanodeCommandProto.Type.SyncCommand)
+          .setSyncCommand(convert((SyncCommand) datanodeCommand));
+      break;
     case DatanodeProtocol.DNA_UNKNOWN: //Not expected
     default:
       builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
@@ -1124,4 +1186,129 @@ public class PBHelper {
 
     return new FileRegion(block, providedStorageLocation);
   }
+
+  private static SyncCommandProto convert(SyncCommand syncCommand) {
+    SyncCommandProto.Builder builder = SyncCommandProto.newBuilder();
+
+    List<BlockSyncTaskProto> syncTaskProtos = syncCommand.getSyncTasks()
+        .stream()
+        .map(syncTask -> convert(syncTask))
+        .collect(Collectors.toList());
+
+    builder.addAllSyncTasks(syncTaskProtos);
+
+    return builder.build();
+  }
+
+  private static BlockSyncTaskProto convert(BlockSyncTask blockSyncTask) {
+    BlockSyncTaskProto.Builder builder = BlockSyncTaskProto.newBuilder();
+    builder.addAllLocatedBlocks(
+        PBHelperClient.convertLocatedBlocks2(blockSyncTask.getLocatedBlocks()));
+    builder.setUploadHandle(
+        ByteString.copyFrom(blockSyncTask.getUploadHandle()));
+    builder.setPartNumber(blockSyncTask.getPartNumber());
+    builder.setUri(blockSyncTask.getRemoteURI().toString());
+    builder.setOffset(blockSyncTask.getOffset());
+    builder.setLength(blockSyncTask.getLength());
+
+    return builder.build();
+  }
+
+  public static SyncTaskIdProto convert(UUID syncTaskId, String syncMountId) {
+    SyncTaskIdProto.Builder builder = SyncTaskIdProto.newBuilder();
+    ByteBuffer syncTaskIdBytes = ByteBuffer.wrap(new byte[16]);
+    syncTaskIdBytes.putLong(syncTaskId.getMostSignificantBits());
+    syncTaskIdBytes.putLong(syncTaskId.getLeastSignificantBits());
+    builder.setSyncTaskId(ByteString.copyFrom(syncTaskIdBytes.array()));
+    builder.setSyncMountId(syncMountId);
+    return builder.build();
+  }
+
+
+  public static BulkSyncTaskExecutionFeedbackProto convert(
+      BulkSyncTaskExecutionFeedback bulkFeedback) {
+    return BulkSyncTaskExecutionFeedbackProto.newBuilder()
+        .addAllFeedbacks(bulkFeedback.getFeedbacks().stream()
+            .map(f -> convert(f)).collect(Collectors.toList()))
+        .build();
+  }
+
+  public static SyncTaskExecutionFeedbackProto convert(
+      BlockSyncTaskExecutionFeedback feedback) {
+    SyncTaskExecutionFeedbackProto.Builder builder =
+        SyncTaskExecutionFeedbackProto.newBuilder()
+            .setSyncTaskId(
+                convert(feedback.getSyncTaskId(), feedback.getSyncMountId()))
+            .setOutcome(convert(feedback.getOutcome()));
+    if (feedback.getResult() != null) {
+      builder.setResult(convert(feedback.getResult()));
+    }
+    return builder.build();
+  }
+
+  public static SyncTaskExecutionOutcomeProto convert(
+      SyncTaskExecutionOutcome outcome) {
+    switch (outcome) {
+    case FINISHED_SUCCESSFULLY:
+      return SyncTaskExecutionOutcomeProto.FINISHED_SUCCESSFULLY;
+    case EXCEPTION:
+      return SyncTaskExecutionOutcomeProto.EXCEPTION;
+    default:
+      throw new IllegalArgumentException(
+          "Unknown SyncTaskExecutionOutcome: " + outcome);
+    }
+  }
+
+  public static SyncTaskExecutionResultProto convert(
+      SyncTaskExecutionResult result) {
+    SyncTaskExecutionResultProto.Builder builder =
+        SyncTaskExecutionResultProto.newBuilder();
+    if (result.getResult() != null) {
+      builder.setResult(ByteString.copyFrom(result.getResult()));
+    }
+    if (result.getNumberOfBytes() != null) {
+      builder.setNumberOfBytes(result.getNumberOfBytes());
+    }
+    return builder.build();
+  }
+
+  public static BulkSyncTaskExecutionFeedback convertBulkSyncTaskExecutionFeedback(
+      BulkSyncTaskExecutionFeedbackProto bulkSyncTaskExecutionFeedback) {
+    return new BulkSyncTaskExecutionFeedback(
+        bulkSyncTaskExecutionFeedback.getFeedbacksList().stream()
+            .map(feedback -> convert(feedback)).collect(Collectors.toList()));
+  }
+
+  public static BlockSyncTaskExecutionFeedback convert(
+      SyncTaskExecutionFeedbackProto feedback) {
+    return new BlockSyncTaskExecutionFeedback(convert(feedback.getSyncTaskId()),
+        convert(feedback.getOutcome()),
+        feedback.hasResult() ? convert(feedback.getResult()) : null,
+        feedback.getSyncTaskId().getSyncMountId());
+  }
+
+  public static SyncTaskExecutionOutcome convert(
+      SyncTaskExecutionOutcomeProto outcome) {
+    switch (outcome) {
+    case FINISHED_SUCCESSFULLY:
+      return SyncTaskExecutionOutcome.FINISHED_SUCCESSFULLY;
+    case EXCEPTION:
+      return SyncTaskExecutionOutcome.EXCEPTION;
+    default:
+      throw new IllegalArgumentException(
+          "Unknown SyncTaskExecutionOutcomeProto: " + outcome);
+    }
+  }
+
+  public static SyncTaskExecutionResult convert(
+      SyncTaskExecutionResultProto result) {
+    byte[] bytes = null;
+    if (result.getResult() != null) {
+      bytes = result.getResult().toByteArray();
+    }
+
+    return new SyncTaskExecutionResult(
+        ByteBuffer.wrap(bytes).asReadOnlyBuffer(), result.getNumberOfBytes());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 9ebc693..bea5fe0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1631,7 +1631,9 @@ public class DatanodeManager {
       int maxTransfers, int failedVolumes,
       VolumeFailureSummary volumeFailureSummary,
       @Nonnull SlowPeerReports slowPeers,
-      @Nonnull SlowDiskReports slowDisks) throws IOException {
+      @Nonnull SlowDiskReports slowDisks,
+      BulkSyncTaskExecutionFeedback bulkSyncTaskExecutionFeedback)
+      throws IOException {
     final DatanodeDescriptor nodeinfo;
     try {
       nodeinfo = getDatanode(nodeReg);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index a94d2df..0e8144a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -511,6 +512,11 @@ class BPServiceActor implements Runnable {
         outliersReportDue && dn.getDiskMetrics() != null ?
             SlowDiskReports.create(dn.getDiskMetrics().getDiskOutliersStats()) :
             SlowDiskReports.EMPTY_REPORT;
+
+    // TODO - collect feedback from SyncTasks here.
+    BulkSyncTaskExecutionFeedback bulkSyncTaskExecutionFeedback =
+        new BulkSyncTaskExecutionFeedback(Collections.emptyList());
+
     HeartbeatResponse response = bpNamenode.sendHeartbeat(bpRegistration,
         reports,
         dn.getFSDataset().getCacheCapacity(),
@@ -521,7 +527,8 @@ class BPServiceActor implements Runnable {
         volumeFailureSummary,
         requestBlockReportLease,
         slowPeers,
-        slowDisks);
+        slowDisks,
+        bulkSyncTaskExecutionFeedback);
 
     if (outliersReportDue) {
       // If the report was due and successfully sent, schedule the next one.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index f94f6d0..b23a528 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -101,6 +101,7 @@ import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.server.namenode.metrics.ReplicatedBlocksMBean;
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.monotonicNow;
@@ -3856,7 +3857,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       VolumeFailureSummary volumeFailureSummary,
       boolean requestFullBlockReportLease,
       @Nonnull SlowPeerReports slowPeers,
-      @Nonnull SlowDiskReports slowDisks) throws IOException {
+      @Nonnull SlowDiskReports slowDisks,
+      BulkSyncTaskExecutionFeedback bulkSyncTaskExecutionFeedback)
+      throws IOException {
     readLock();
     try {
       //get datanode commands
@@ -3865,7 +3868,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
           nodeReg, reports, getBlockPoolId(), cacheCapacity, cacheUsed,
           xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary,
-          slowPeers, slowDisks);
+          slowPeers, slowDisks, bulkSyncTaskExecutionFeedback);
       long blockReportLeaseId = 0;
       if (requestFullBlockReportLease) {
         blockReportLeaseId =  blockManager.requestBlockReportLeaseId(nodeReg);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index c5b9d5a..413a14a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -157,6 +157,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -1498,13 +1499,15 @@ public class NameNodeRpcServer implements NamenodeProtocols {
       int failedVolumes, VolumeFailureSummary volumeFailureSummary,
       boolean requestFullBlockReportLease,
       @Nonnull SlowPeerReports slowPeers,
-      @Nonnull SlowDiskReports slowDisks) throws IOException {
+      @Nonnull SlowDiskReports slowDisks,
+      BulkSyncTaskExecutionFeedback bulkSyncTaskExecutionFeedback)
+      throws IOException {
     checkNNStartup();
     verifyRequest(nodeReg);
     return namesystem.handleHeartbeat(nodeReg, report,
         dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
         failedVolumes, volumeFailureSummary, requestFullBlockReportLease,
-        slowPeers, slowDisks);
+        slowPeers, slowDisks, bulkSyncTaskExecutionFeedback);
   }
 
   @Override // DatanodeProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTask.java
new file mode 100644
index 0000000..875a409
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockSyncTask.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import java.net.URI;
+import java.util.List;
+import java.util.UUID;
+
+/**
+ * A BlockSyncTask is an operation that is sent to the datanodes to copy
+ * blocks to an external storage endpoint as a part of an orchestrated
+ * synchronization across multiple datanodes.
+ * BlockSyncTask is intended to be an immutable POJO.
+ */
+public class BlockSyncTask {
+  private final UUID syncTaskId;
+  private final URI remoteURI;
+  private final List<LocatedBlock> locatedBlocks;
+  private String syncMountId;
+  private final int partNumber;
+  private byte[] uploadHandle;
+  private final int offset;
+  private final long length;
+
+  public BlockSyncTask(UUID syncTaskId, URI remoteURI,
+      List<LocatedBlock> locatedBlocks, Integer partNumber, byte[] uploadHandle,
+      int offset, long length, String syncMountId) {
+    this.syncTaskId = syncTaskId;
+    this.remoteURI = remoteURI;
+    this.locatedBlocks = locatedBlocks;
+    this.syncMountId = syncMountId;
+    this.partNumber = partNumber;
+    this.uploadHandle = uploadHandle;
+    this.offset = offset;
+    this.length = length;
+  }
+
+  public int getPartNumber() {
+    return partNumber;
+  }
+
+  public byte[] getUploadHandle() {
+    return uploadHandle;
+  }
+
+  public int getOffset() {
+    return offset;
+  }
+
+  public long getLength() {
+    return length;
+  }
+
+  public UUID getSyncTaskId() {
+    return syncTaskId;
+  }
+
+  public URI getRemoteURI() {
+    return remoteURI;
+  }
+
+  public List<LocatedBlock> getLocatedBlocks() {
+    return locatedBlocks;
+  }
+
+  public String getSyncMountId() {
+    return syncMountId;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BulkSyncTaskExecutionFeedback.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BulkSyncTaskExecutionFeedback.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BulkSyncTaskExecutionFeedback.java
new file mode 100644
index 0000000..0d459e8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BulkSyncTaskExecutionFeedback.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.Collection;
+
+/**
+ * Feedback for a collection of {@link BlockSyncTask}s.
+ */
+public class BulkSyncTaskExecutionFeedback {
+
+  private Collection<BlockSyncTaskExecutionFeedback> feedbacks;
+
+  public BulkSyncTaskExecutionFeedback(
+      Collection<BlockSyncTaskExecutionFeedback> feedbacks) {
+    this.feedbacks = feedbacks;
+  }
+
+  public Collection<BlockSyncTaskExecutionFeedback> getFeedbacks() {
+    return feedbacks;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index 1f55100..d69dee7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -79,6 +79,7 @@ public interface DatanodeProtocol {
   final static int DNA_CACHE = 9;      // cache blocks
   final static int DNA_UNCACHE = 10;   // uncache blocks
   final static int DNA_ERASURE_CODING_RECONSTRUCTION = 11; // erasure coding reconstruction command
+  final static int DNA_BACKUP = 14; // back up data to PROVIDED stores.
 
   /** 
    * Register Datanode.
@@ -101,6 +102,8 @@ public interface DatanodeProtocol {
    * or to copy them to other DataNodes, etc.
    * @param registration datanode registration information
    * @param reports utilization report per storage
+   * @param cacheCapacity
+   * @param cacheUsed
    * @param xmitsInProgress number of transfers from this datanode to others
    * @param xceiverCount number of active transceiver threads
    * @param failedVolumes number of failed volumes
@@ -110,20 +113,19 @@ public interface DatanodeProtocol {
    * @param slowPeers Details of peer DataNodes that were detected as being
    *                  slow to respond to packet writes. Empty report if no
    *                  slow peers were detected by the DataNode.
+   * @param bulkSyncTaskExecutionFeedback Result of the execution of the
+   *                                      sync tasks.
    * @throws IOException on error
    */
   @Idempotent
   public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration,
-                                       StorageReport[] reports,
-                                       long dnCacheCapacity,
-                                       long dnCacheUsed,
-                                       int xmitsInProgress,
-                                       int xceiverCount,
-                                       int failedVolumes,
-                                       VolumeFailureSummary volumeFailureSummary,
-                                       boolean requestFullBlockReportLease,
-                                       @Nonnull SlowPeerReports slowPeers,
-                                       @Nonnull SlowDiskReports slowDisks)
+      StorageReport[] reports, long cacheCapacity, long cacheUsed,
+      int xmitsInProgress, int xceiverCount, int failedVolumes,
+      VolumeFailureSummary volumeFailureSummary,
+      boolean requestFullBlockReportLease,
+      @Nonnull SlowPeerReports slowPeers,
+      @Nonnull SlowDiskReports slowDisks,
+      BulkSyncTaskExecutionFeedback bulkSyncTaskExecutionFeedback)
       throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncCommand.java
new file mode 100644
index 0000000..7e2c242
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/SyncCommand.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.util.List;
+
+/**
+ * A SyncCommand is an instruction to a DataNode to move the
+ * give file to specified target DataNodes provided storage.
+ */
+public class SyncCommand extends DatanodeCommand {
+
+  private final List<BlockSyncTask> syncTasks;
+
+  public SyncCommand(int action, List<BlockSyncTask> syncTasks) {
+    super(action);
+    this.syncTasks = syncTasks;
+  }
+
+  public List<BlockSyncTask> getSyncTasks() {
+    return syncTasks;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index bf0df5b..5d6ef41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -60,6 +60,7 @@ message DatanodeCommandProto {
     NullDatanodeCommand = 7;
     BlockIdCommand = 8;
     BlockECReconstructionCommand = 9;
+    SyncCommand = 10;
   }
 
   required Type cmdType = 1;    // Type of the command
@@ -74,6 +75,7 @@ message DatanodeCommandProto {
   optional RegisterCommandProto registerCmd = 7;
   optional BlockIdCommandProto blkIdCmd = 8;
   optional BlockECReconstructionCommandProto blkECReconstructionCmd = 9;
+  optional SyncCommandProto syncCommand = 10;
 }
 
 /**
@@ -154,6 +156,89 @@ message BlockECReconstructionCommandProto {
   repeated BlockECReconstructionInfoProto blockECReconstructioninfo = 1;
 }
 
+message SyncTaskIdProto {
+  required bytes syncTaskId = 1;
+  required string syncMountId = 2;
+}
+
+/**
+ * Instruct the datanode to perform a backup command
+ */
+message SyncCommandProto {
+  repeated BlockSyncTaskProto syncTasks = 1;
+}
+
+/**
+ * A block synchronization task as part of an orchestrated synchronization
+ * across potentially multiple datanodes (i.e. multipart put part).
+ */
+message BlockSyncTaskProto {
+  required SyncTaskIdProto syncTaskId = 1;
+
+  required bytes uploadHandle = 2;
+  required int32 partNumber = 3;
+  repeated LocatedBlockProto locatedBlocks = 4;
+  required string uri = 5;
+  required int32 offset = 6;
+  required int64 length = 7;
+}
+
+/**
+ * Block storage movement information
+ */
+message BlockMovingInfoProto {
+  required BlockProto block = 1;
+  required DatanodeInfoProto sourceDnInfo = 2;
+  required DatanodeInfoProto targetDnInfo = 3;
+  required StorageTypeProto sourceStorageType = 4;
+  required StorageTypeProto targetStorageType = 5;
+}
+
+/**
+ * Blocks for which storage movements has been attempted and finished
+ * with either success or failure.
+ */
+message BlocksStorageMoveAttemptFinishedProto {
+  repeated BlockProto blocks = 1;
+}
+
+/**
+ * A collection of feedbacks for a collection of sync tasks.
+ */
+message BulkSyncTaskExecutionFeedbackProto {
+  repeated SyncTaskExecutionFeedbackProto feedbacks = 1;
+}
+
+/**
+ * Feedback for a sync task that has been executed.
+ * syncTaskId - identifier for the task.
+ * outcome - success/error.
+ * operation - the type of operation.
+ * result - if the outcome is successful, the results of the sync task.
+ */
+message SyncTaskExecutionFeedbackProto {
+  required SyncTaskIdProto syncTaskId = 1;
+  required SyncTaskExecutionOutcomeProto outcome = 2;
+  optional SyncTaskExecutionResultProto result = 3;
+}
+
+/**
+ * Success of failure indication of a sync task.
+ */
+enum SyncTaskExecutionOutcomeProto {
+  FINISHED_SUCCESSFULLY = 0;
+  EXCEPTION = 1;
+}
+
+/**
+ * result - the opaque result data from the sync task.
+ * numberOfBytes - the number of bytes copied.
+ */
+message SyncTaskExecutionResultProto {
+  optional bytes result = 1;
+  optional int64 numberOfBytes = 2;
+}
+
 /**
  * registration - Information of the datanode registering with the namenode
  */
@@ -210,6 +295,7 @@ message HeartbeatRequestProto {
   optional bool requestFullBlockReportLease = 9 [ default = false ];
   repeated SlowPeerReportProto slowPeers = 10;
   repeated SlowDiskReportProto slowDisks = 11;
+  optional BulkSyncTaskExecutionFeedbackProto bulkSyncTaskExecutionFeedback = 12;
 }
 
 /**
@@ -275,7 +361,7 @@ message StorageBlockReportProto {
  */
 message BlockReportResponseProto {
   optional DatanodeCommandProto cmd = 1;
-} 
+}
 
 /**
  * registration - datanode registration information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index dd6f40a..8ba0396 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -589,7 +589,7 @@ public class TestDatanodeManager {
     Mockito.when(dm.getDatanode(dnReg)).thenReturn(nodeInfo);
     DatanodeCommand[] cmds = dm.handleHeartbeat(
         dnReg, new StorageReport[1], "bp-123", 0, 0, 10, maxTransfers, 0, null,
-        SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT);
+        SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT, null);
 
     long expectedNumCmds = Arrays.stream(
         new int[]{numReplicationTasks, numECTasks})

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index 05b6d30..e81fb1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -116,7 +116,7 @@ public class TestNameNodePrunesMissingStorages {
       cluster.stopDataNode(0);
       cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0,
           0, null, true, SlowPeerReports.EMPTY_REPORT,
-          SlowDiskReports.EMPTY_REPORT);
+          SlowDiskReports.EMPTY_REPORT, null);
 
       // Check that the missing storage was pruned.
       assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
index b453991..2a6975f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java
@@ -167,7 +167,8 @@ public class InternalDataNodeTestUtils {
             Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class),
             Mockito.anyBoolean(),
             Mockito.any(SlowPeerReports.class),
-            Mockito.any(SlowDiskReports.class))).thenReturn(
+            Mockito.any(SlowDiskReports.class),
+            null)).thenReturn(
         new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(
             HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current()
             .nextLong() | 1L));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index 4863ca1..62b84d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
+
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
@@ -157,7 +159,8 @@ public class TestBPOfferService {
           Mockito.any(VolumeFailureSummary.class),
           Mockito.anyBoolean(),
           Mockito.any(SlowPeerReports.class),
-          Mockito.any(SlowDiskReports.class));
+          Mockito.any(SlowDiskReports.class),
+          Mockito.any(BulkSyncTaskExecutionFeedback.class));
     mockHaStatuses[nnIdx] = new NNHAStatusHeartbeat(HAServiceState.STANDBY, 0);
     datanodeCommands[nnIdx] = new DatanodeCommand[0];
     return mock;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index 07fd4ae..09fae14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
@@ -232,7 +233,8 @@ public class TestBlockRecovery {
             Mockito.any(VolumeFailureSummary.class),
             Mockito.anyBoolean(),
             Mockito.any(SlowPeerReports.class),
-            Mockito.any(SlowDiskReports.class)))
+            Mockito.any(SlowDiskReports.class),
+            Mockito.any(BulkSyncTaskExecutionFeedback.class)))
         .thenReturn(new HeartbeatResponse(
             new DatanodeCommand[0],
             new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
index 28427bc..6374540 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeLifeline.java
@@ -24,13 +24,14 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHEC
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY;
 
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.any;
+import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.anyBoolean;
 import static org.mockito.Mockito.anyInt;
 import static org.mockito.Mockito.anyLong;
@@ -172,7 +173,8 @@ public class TestDataNodeLifeline {
             any(VolumeFailureSummary.class),
             anyBoolean(),
             any(SlowPeerReports.class),
-            any(SlowDiskReports.class));
+            any(SlowDiskReports.class),
+            any(BulkSyncTaskExecutionFeedback.class));
 
     // Intercept lifeline to trigger latch count-down on each call.
     doAnswer(new LatchCountingAnswer<Void>(lifelinesSent))
@@ -237,7 +239,8 @@ public class TestDataNodeLifeline {
             any(VolumeFailureSummary.class),
             anyBoolean(),
             any(SlowPeerReports.class),
-            any(SlowDiskReports.class));
+            any(SlowDiskReports.class),
+            any(BulkSyncTaskExecutionFeedback.class));
 
     // While waiting on the latch for the expected number of heartbeat messages,
     // poll DataNode tracking information.  We expect that the DataNode always

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
index bb1d9ef..2d7dea9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -222,7 +223,8 @@ public class TestDatanodeProtocolRetryPolicy {
            Mockito.any(VolumeFailureSummary.class),
            Mockito.anyBoolean(),
            Mockito.any(SlowPeerReports.class),
-           Mockito.any(SlowDiskReports.class));
+           Mockito.any(SlowDiskReports.class),
+           Mockito.any(BulkSyncTaskExecutionFeedback.class));
 
     dn = new DataNode(conf, locations, null, null) {
       @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
index 2dbd5b9..2cf0135 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import net.jcip.annotations.NotThreadSafe;
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
@@ -208,7 +209,8 @@ public class TestFsDatasetCache {
           (StorageReport[]) any(), anyLong(), anyLong(),
           anyInt(), anyInt(), anyInt(), (VolumeFailureSummary) any(),
           anyBoolean(), any(SlowPeerReports.class),
-          any(SlowDiskReports.class));
+          any(SlowDiskReports.class),
+          any(BulkSyncTaskExecutionFeedback.class));
     } finally {
       lock.writeLock().unlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
index 5f62ddb..0f0bc1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestStorageReport.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
@@ -110,7 +111,8 @@ public class TestStorageReport {
         anyLong(), anyLong(), anyInt(), anyInt(), anyInt(),
         Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(),
         Mockito.any(SlowPeerReports.class),
-        Mockito.any(SlowDiskReports.class));
+        Mockito.any(SlowDiskReports.class),
+        Mockito.any(BulkSyncTaskExecutionFeedback.class));
 
     StorageReport[] reports = captor.getValue();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 3a3c471..9940174 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -956,8 +956,8 @@ public class NNThroughputBenchmark implements Tool {
           DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0L) };
       DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration, rep,
           0L, 0L, 0, 0, 0, null, true,
-          SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT)
-          .getCommands();
+          SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT,
+          null).getCommands();
       if(cmds != null) {
         for (DatanodeCommand cmd : cmds ) {
           if(LOG.isDebugEnabled()) {
@@ -1007,8 +1007,8 @@ public class NNThroughputBenchmark implements Tool {
           false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0) };
       DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration,
           rep, 0L, 0L, 0, 0, 0, null, true,
-          SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT)
-          .getCommands();
+          SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT,
+          null).getCommands();
       if (cmds != null) {
         for (DatanodeCommand cmd : cmds) {
           if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index b85527a..8589f75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.hdfs.server.protocol.BulkSyncTaskExecutionFeedback;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 import static org.mockito.Mockito.spy;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.lang3.reflect.FieldUtils;
@@ -130,7 +132,8 @@ public class NameNodeAdapter {
     return namesystem.handleHeartbeat(nodeReg,
         BlockManagerTestUtil.getStorageReportsForDatanode(dd),
         dd.getCacheCapacity(), dd.getCacheRemaining(), 0, 0, 0, null, true,
-        SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT);
+        SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT,
+        new BulkSyncTaskExecutionFeedback(new ArrayList<>()));
   }
 
   public static boolean setReplication(final FSNamesystem ns,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b210ee3e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 366f584..9f9a897 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -139,8 +139,8 @@ public class TestDeadDatanode {
         false, 0, 0, 0, 0, 0) };
     DatanodeCommand[] cmd =
         dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true,
-            SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT)
-            .getCommands();
+            SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT,
+            null).getCommands();
     assertEquals(1, cmd.length);
     assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
         .getAction());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: YARN-8415. TimelineWebServices.getEntity should throw ForbiddenException instead of 404 when ACL checks fail. Contributed by Suma Shivaprasad.

Posted by vi...@apache.org.
YARN-8415. TimelineWebServices.getEntity should throw ForbiddenException instead of 404 when ACL checks fail. Contributed by Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa9ef15e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa9ef15e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa9ef15e

Branch: refs/heads/HDFS-12090
Commit: fa9ef15ecd6dc30fb260e1c342a2b51505d39b6b
Parents: 53e267f
Author: Sunil G <su...@apache.org>
Authored: Mon Jul 2 15:34:37 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Mon Jul 2 15:34:47 2018 -0700

----------------------------------------------------------------------
 .../yarn/server/timeline/RollingLevelDBTimelineStore.java     | 6 ++++++
 .../hadoop/yarn/server/timeline/TimelineDataManager.java      | 7 ++++++-
 .../yarn/server/timeline/webapp/TimelineWebServices.java      | 4 ++++
 .../yarn/server/timeline/webapp/TestTimelineWebServices.java  | 2 +-
 4 files changed, 17 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa9ef15e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 36b5ce8..255547b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -413,6 +413,9 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
       EnumSet<Field> fields) throws IOException {
     Long revStartTime = getStartTimeLong(entityId, entityType);
     if (revStartTime == null) {
+      if ( LOG.isDebugEnabled()) {
+        LOG.debug("Could not find start time for {} {} ", entityType, entityId);
+      }
       return null;
     }
     byte[] prefix = KeyBuilder.newInstance().add(entityType)
@@ -421,6 +424,9 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
 
     DB db = entitydb.getDBForStartTime(revStartTime);
     if (db == null) {
+      if ( LOG.isDebugEnabled()) {
+        LOG.debug("Could not find db for {} {} ", entityType, entityId);
+      }
       return null;
     }
     try (DBIterator iterator = db.iterator()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa9ef15e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
index 56b71fa..c538196 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/TimelineDataManager.java
@@ -219,7 +219,12 @@ public class TimelineDataManager extends AbstractService {
       // check ACLs
       if (!timelineACLsManager.checkAccess(
           callerUGI, ApplicationAccessType.VIEW_APP, entity)) {
-        entity = null;
+        final String user = callerUGI != null ? callerUGI.getShortUserName():
+            null;
+        throw new YarnException(
+            user + " is not allowed to get the timeline entity "
+            + "{ id: " + entity.getEntityId() + ", type: "
+            + entity.getEntityType() + " }.");
       }
     }
     return entity;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa9ef15e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
index be8e3c5..9423e7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/webapp/TimelineWebServices.java
@@ -162,6 +162,10 @@ public class TimelineWebServices {
           parseStr(entityId),
           parseFieldsStr(fields, ","),
           getUser(req));
+    } catch (YarnException e) {
+      // The user doesn't have the access to override the existing domain.
+      LOG.info(e.getMessage(), e);
+      throw new ForbiddenException(e);
     } catch (IllegalArgumentException e) {
       throw new BadRequestException(e);
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa9ef15e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
index ca78cbc..b6d2967 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/timeline/webapp/TestTimelineWebServices.java
@@ -709,7 +709,7 @@ public class TestTimelineWebServices extends JerseyTestBase {
           .get(ClientResponse.class);
       assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
           response.getType().toString());
-      assertResponseStatusCode(Status.NOT_FOUND, response.getStatusInfo());
+      assertResponseStatusCode(Status.FORBIDDEN, response.getStatusInfo());
     } finally {
       timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: YARN-8459. Improve Capacity Scheduler logs to debug invalid states. Contributed by Wangda Tan.

Posted by vi...@apache.org.
YARN-8459. Improve Capacity Scheduler logs to debug invalid states. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51654a39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51654a39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51654a39

Branch: refs/heads/HDFS-12090
Commit: 51654a3962bcd0482c0e1dd32765e9533e3f9158
Parents: 344f324
Author: Sunil G <su...@apache.org>
Authored: Tue Jul 3 09:59:20 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Tue Jul 3 09:59:20 2018 -0700

----------------------------------------------------------------------
 .../scheduler/capacity/CapacityScheduler.java   | 39 +++++++++++++-------
 .../scheduler/capacity/ParentQueue.java         | 36 ++++++++++++------
 .../allocator/AbstractContainerAllocator.java   | 13 ++++---
 .../scheduler/common/fica/FiCaSchedulerApp.java |  5 +++
 4 files changed, 63 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51654a39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index b59636a..37f56de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -1234,8 +1234,10 @@ public class CapacityScheduler extends
       updateDemandForQueue.getOrderingPolicy().demandUpdated(application);
     }
 
-    LOG.info("Allocation for application " + applicationAttemptId + " : " +
-        allocation + " with cluster resource : " + getClusterResource());
+    if (LOG.isDebugEnabled()) {
+      LOG.info("Allocation for application " + applicationAttemptId + " : "
+          + allocation + " with cluster resource : " + getClusterResource());
+    }
     return allocation;
   }
 
@@ -1479,10 +1481,17 @@ public class CapacityScheduler extends
   private CSAssignment allocateContainerOnSingleNode(
       CandidateNodeSet<FiCaSchedulerNode> candidates, FiCaSchedulerNode node,
       boolean withNodeHeartbeat) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug(
+          "Trying to schedule on node: " + node.getNodeName() + ", available: "
+              + node.getUnallocatedResource());
+    }
+
     // Backward compatible way to make sure previous behavior which allocation
     // driven by node heartbeat works.
     if (getNode(node.getNodeID()) != node) {
-      LOG.error("Trying to schedule on a removed node, please double check.");
+      LOG.error("Trying to schedule on a removed node, please double check, "
+          + "nodeId=" + node.getNodeID());
       return null;
     }
 
@@ -1496,14 +1505,19 @@ public class CapacityScheduler extends
       FiCaSchedulerApp reservedApplication = getCurrentAttemptForContainer(
           reservedContainer.getContainerId());
       if (reservedApplication == null) {
-        LOG.error("Trying to schedule for a finished app, please double check.");
+        LOG.error(
+            "Trying to schedule for a finished app, please double check. nodeId="
+                + node.getNodeID() + " container=" + reservedContainer
+                .getContainerId());
         return null;
       }
 
       // Try to fulfill the reservation
-      LOG.info(
-          "Trying to fulfill reservation for application " + reservedApplication
-              .getApplicationId() + " on node: " + node.getNodeID());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Trying to fulfill reservation for application "
+            + reservedApplication.getApplicationId() + " on node: " + node
+            .getNodeID());
+      }
 
       LeafQueue queue = ((LeafQueue) reservedApplication.getQueue());
       assignment = queue.assignContainers(getClusterResource(), candidates,
@@ -1567,12 +1581,6 @@ public class CapacityScheduler extends
       return null;
     }
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          "Trying to schedule on node: " + node.getNodeName() + ", available: "
-              + node.getUnallocatedResource());
-    }
-
     return allocateOrReserveNewContainers(candidates, withNodeHeartbeat);
   }
 
@@ -2888,6 +2896,11 @@ public class CapacityScheduler extends
           LOG.info("Failed to accept allocation proposal");
         }
 
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Allocation proposal accepted=" + isSuccess + ", proposal="
+              + request);
+        }
+
         // Update unconfirmed allocated resource.
         if (updateUnconfirmedAllocatedResource) {
           app.decUnconfirmedRes(request.getTotalAllocatedResource());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51654a39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index bb4823e..2363b88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -90,6 +90,8 @@ public class ParentQueue extends AbstractCSQueue {
 
   private QueueOrderingPolicy queueOrderingPolicy;
 
+  private long lastSkipQueueDebugLoggingTimestamp = -1;
+
   public ParentQueue(CapacitySchedulerContext cs,
       String queueName, CSQueue parent, CSQueue old) throws IOException {
     super(cs, queueName, parent, old);
@@ -539,9 +541,14 @@ public class ParentQueue extends AbstractCSQueue {
     if (schedulingMode == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY
         && !accessibleToPartition(candidates.getPartition())) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Skip this queue=" + getQueuePath()
-            + ", because it is not able to access partition=" + candidates
-            .getPartition());
+        long now = System.currentTimeMillis();
+        // Do logging every 1 sec to avoid excessive logging.
+        if (now - this.lastSkipQueueDebugLoggingTimestamp > 1000) {
+          LOG.debug("Skip this queue=" + getQueuePath()
+              + ", because it is not able to access partition=" + candidates
+              .getPartition());
+          this.lastSkipQueueDebugLoggingTimestamp = now;
+        }
       }
 
       ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node,
@@ -561,10 +568,15 @@ public class ParentQueue extends AbstractCSQueue {
     if (!super.hasPendingResourceRequest(candidates.getPartition(),
         clusterResource, schedulingMode)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Skip this queue=" + getQueuePath()
-            + ", because it doesn't need more resource, schedulingMode="
-            + schedulingMode.name() + " node-partition=" + candidates
-            .getPartition());
+        long now = System.currentTimeMillis();
+        // Do logging every 1 sec to avoid excessive logging.
+        if (now - this.lastSkipQueueDebugLoggingTimestamp > 1000) {
+          LOG.debug("Skip this queue=" + getQueuePath()
+              + ", because it doesn't need more resource, schedulingMode="
+              + schedulingMode.name() + " node-partition=" + candidates
+              .getPartition());
+          this.lastSkipQueueDebugLoggingTimestamp = now;
+        }
       }
 
       ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node,
@@ -666,12 +678,12 @@ public class ParentQueue extends AbstractCSQueue {
         assignment.setIncreasedAllocation(
             assignedToChild.isIncreasedAllocation());
 
-        LOG.info("assignedContainer" + " queue=" + getQueueName()
-            + " usedCapacity=" + getUsedCapacity() + " absoluteUsedCapacity="
-            + getAbsoluteUsedCapacity() + " used=" + queueUsage.getUsed()
-            + " cluster=" + clusterResource);
-
         if (LOG.isDebugEnabled()) {
+          LOG.debug("assignedContainer reserved=" + isReserved + " queue="
+              + getQueueName() + " usedCapacity=" + getUsedCapacity()
+              + " absoluteUsedCapacity=" + getAbsoluteUsedCapacity() + " used="
+              + queueUsage.getUsed() + " cluster=" + clusterResource);
+
           LOG.debug(
               "ParentQ=" + getQueueName() + " assignedSoFarInThisIteration="
                   + assignment.getResource() + " usedCapacity="

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51654a39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
index 36665d6..bfc129f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/AbstractContainerAllocator.java
@@ -93,11 +93,14 @@ public abstract class AbstractContainerAllocator {
       assignment.setType(result.getContainerNodeType());
 
       if (result.getAllocationState() == AllocationState.RESERVED) {
-        // This is a reserved container
-        LOG.info("Reserved container " + " application="
-            + application.getApplicationId() + " resource=" + allocatedResource
-            + " queue=" + appInfo.getQueueName()
-            + " cluster=" + clusterResource);
+        if (LOG.isDebugEnabled()) {
+          // This is a reserved container
+          // Since re-reservation could happen again and again for already
+          // reserved containers. only do this in debug log.
+          LOG.debug("Reserved container " + " application=" + application
+              .getApplicationId() + " resource=" + allocatedResource + " queue="
+              + appInfo.getQueueName() + " cluster=" + clusterResource);
+        }
         assignment.getAssignmentInformation().addReservationDetails(
             updatedContainer, application.getCSLeafQueue().getQueuePath());
         assignment.getAssignmentInformation().incrReservations();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51654a39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
index 3ec8191..3b1b82c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerApp.java
@@ -607,6 +607,11 @@ public class FiCaSchedulerApp extends SchedulerApplicationAttempt {
               schedulerContainer.getRmContainer(),
               schedulerContainer.getRmContainer().getContainer(),
               reReservation);
+
+          LOG.info("Reserved container=" + rmContainer.getContainerId()
+              + ", on node=" + schedulerContainer.getSchedulerNode()
+              + " with resource=" + rmContainer
+              .getAllocatedOrReservedResource());
         }
       }
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java
deleted file mode 100644
index 6b42c27..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-
-/**
- * A class that encapsulates the KsmVolumeArgs Args.
- */
-public final class KsmVolumeArgs {
-  private final String adminName;
-  private final String ownerName;
-  private final String volume;
-  private final long creationTime;
-  private final long quotaInBytes;
-  private final Map<String, String> keyValueMap;
-  private final KsmOzoneAclMap aclMap;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param adminName  - Administrator's name.
-   * @param ownerName  - Volume owner's name
-   * @param volume - volume name
-   * @param quotaInBytes - Volume Quota in bytes.
-   * @param keyValueMap - keyValue map.
-   * @param aclMap - User to access rights map.
-   * @param creationTime - Volume creation time.
-   */
-  private KsmVolumeArgs(String adminName, String ownerName, String volume,
-                        long quotaInBytes, Map<String, String> keyValueMap,
-                        KsmOzoneAclMap aclMap, long creationTime) {
-    this.adminName = adminName;
-    this.ownerName = ownerName;
-    this.volume = volume;
-    this.quotaInBytes = quotaInBytes;
-    this.keyValueMap = keyValueMap;
-    this.aclMap = aclMap;
-    this.creationTime = creationTime;
-  }
-
-  /**
-   * Returns the Admin Name.
-   * @return String.
-   */
-  public String getAdminName() {
-    return adminName;
-  }
-
-  /**
-   * Returns the owner Name.
-   * @return String
-   */
-  public String getOwnerName() {
-    return ownerName;
-  }
-
-  /**
-   * Returns the volume Name.
-   * @return String
-   */
-  public String getVolume() {
-    return volume;
-  }
-
-  /**
-   * Returns creation time.
-   * @return long
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Returns Quota in Bytes.
-   * @return long, Quota in bytes.
-   */
-  public long getQuotaInBytes() {
-    return quotaInBytes;
-  }
-
-  public Map<String, String> getKeyValueMap() {
-    return keyValueMap;
-  }
-
-  public KsmOzoneAclMap getAclMap() {
-    return aclMap;
-  }
-  /**
-   * Returns new builder class that builds a KsmVolumeArgs.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder for KsmVolumeArgs.
-   */
-  public static class Builder {
-    private String adminName;
-    private String ownerName;
-    private String volume;
-    private long creationTime;
-    private long quotaInBytes;
-    private Map<String, String> keyValueMap;
-    private KsmOzoneAclMap aclMap;
-
-    /**
-     * Constructs a builder.
-     */
-    Builder() {
-      keyValueMap = new HashMap<>();
-      aclMap = new KsmOzoneAclMap();
-    }
-
-    public Builder setAdminName(String admin) {
-      this.adminName = admin;
-      return this;
-    }
-
-    public Builder setOwnerName(String owner) {
-      this.ownerName = owner;
-      return this;
-    }
-
-    public Builder setVolume(String volumeName) {
-      this.volume = volumeName;
-      return this;
-    }
-
-    public Builder setCreationTime(long createdOn) {
-      this.creationTime = createdOn;
-      return this;
-    }
-
-    public Builder setQuotaInBytes(long quota) {
-      this.quotaInBytes = quota;
-      return this;
-    }
-
-    public Builder addMetadata(String key, String value) {
-      keyValueMap.put(key, value); // overwrite if present.
-      return this;
-    }
-
-    public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException {
-      aclMap.addAcl(acl);
-      return this;
-    }
-
-    /**
-     * Constructs a CreateVolumeArgument.
-     * @return CreateVolumeArgs.
-     */
-    public KsmVolumeArgs build() {
-      Preconditions.checkNotNull(adminName);
-      Preconditions.checkNotNull(ownerName);
-      Preconditions.checkNotNull(volume);
-      return new KsmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
-          keyValueMap, aclMap, creationTime);
-    }
-  }
-
-  public VolumeInfo getProtobuf() {
-    List<KeyValue> metadataList = new LinkedList<>();
-    for (Map.Entry<String, String> entry : keyValueMap.entrySet()) {
-      metadataList.add(KeyValue.newBuilder().setKey(entry.getKey()).
-          setValue(entry.getValue()).build());
-    }
-    List<OzoneAclInfo> aclList = aclMap.ozoneAclGetProtobuf();
-
-    return VolumeInfo.newBuilder()
-        .setAdminName(adminName)
-        .setOwnerName(ownerName)
-        .setVolume(volume)
-        .setQuotaInBytes(quotaInBytes)
-        .addAllMetadata(metadataList)
-        .addAllVolumeAcls(aclList)
-        .setCreationTime(creationTime)
-        .build();
-  }
-
-  public static KsmVolumeArgs getFromProtobuf(VolumeInfo volInfo) {
-    Map<String, String> kvMap = volInfo.getMetadataList().stream()
-        .collect(Collectors.toMap(KeyValue::getKey,
-            KeyValue::getValue));
-    KsmOzoneAclMap aclMap =
-        KsmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList());
-
-    return new KsmVolumeArgs(volInfo.getAdminName(), volInfo.getOwnerName(),
-        volInfo.getVolume(), volInfo.getQuotaInBytes(), kvMap, aclMap,
-        volInfo.getCreationTime());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java
deleted file mode 100644
index c19c04b..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/OpenKeySession.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-/**
- * This class represents a open key "session". A session here means a key is
- * opened by a specific client, the client sends the handler to server, such
- * that servers can recognize this client, and thus know how to close the key.
- */
-public class OpenKeySession {
-  private final int id;
-  private final KsmKeyInfo keyInfo;
-  // the version of the key when it is being opened in this session.
-  // a block that has a create version equals to open version means it will
-  // be committed only when this open session is closed.
-  private long openVersion;
-
-  public OpenKeySession(int id, KsmKeyInfo info, long version) {
-    this.id = id;
-    this.keyInfo = info;
-    this.openVersion = version;
-  }
-
-  public long getOpenVersion() {
-    return this.openVersion;
-  }
-
-  public KsmKeyInfo getKeyInfo() {
-    return keyInfo;
-  }
-
-  public int getId() {
-    return id;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java
deleted file mode 100644
index e07232d..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java
+++ /dev/null
@@ -1,237 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.helpers;
-
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos
-    .ServicePort;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-/**
- * ServiceInfo holds the config details of Ozone services.
- */
-public final class ServiceInfo {
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ServiceInfo.class);
-  private static final ObjectWriter WRITER =
-      new ObjectMapper().writerWithDefaultPrettyPrinter();
-
-  /**
-   * Type of node/service.
-   */
-  private NodeType nodeType;
-  /**
-   * Hostname of the node in which the service is running.
-   */
-  private String hostname;
-
-  /**
-   * List of ports the service listens to.
-   */
-  private Map<ServicePort.Type, Integer> ports;
-
-  /**
-   * Default constructor for JSON deserialization.
-   */
-  public ServiceInfo() {}
-
-  /**
-   * Constructs the ServiceInfo for the {@code nodeType}.
-   * @param nodeType type of node/service
-   * @param hostname hostname of the service
-   * @param portList list of ports the service listens to
-   */
-  private ServiceInfo(
-      NodeType nodeType, String hostname, List<ServicePort> portList) {
-    Preconditions.checkNotNull(nodeType);
-    Preconditions.checkNotNull(hostname);
-    this.nodeType = nodeType;
-    this.hostname = hostname;
-    this.ports = new HashMap<>();
-    for (ServicePort port : portList) {
-      ports.put(port.getType(), port.getValue());
-    }
-  }
-
-  /**
-   * Returns the type of node/service.
-   * @return node type
-   */
-  public NodeType getNodeType() {
-    return nodeType;
-  }
-
-  /**
-   * Returns the hostname of the service.
-   * @return hostname
-   */
-  public String getHostname() {
-    return hostname;
-  }
-
-  /**
-   * Returns ServicePort.Type to port mappings.
-   * @return ports
-   */
-  public Map<ServicePort.Type, Integer> getPorts() {
-    return ports;
-  }
-
-  /**
-   * Returns the port for given type, null if the service doesn't support
-   * the type.
-   *
-   * @param type the type of port.
-   *             ex: RPC, HTTP, HTTPS, etc..
-   */
-  @JsonIgnore
-  public int getPort(ServicePort.Type type) {
-    return ports.get(type);
-  }
-
-  /**
-   * Converts {@link ServiceInfo} to KeySpaceManagerProtocolProtos.ServiceInfo.
-   *
-   * @return KeySpaceManagerProtocolProtos.ServiceInfo
-   */
-  @JsonIgnore
-  public KeySpaceManagerProtocolProtos.ServiceInfo getProtobuf() {
-    KeySpaceManagerProtocolProtos.ServiceInfo.Builder builder =
-        KeySpaceManagerProtocolProtos.ServiceInfo.newBuilder();
-    builder.setNodeType(nodeType)
-        .setHostname(hostname)
-        .addAllServicePorts(
-            ports.entrySet().stream()
-                .map(
-                    entry ->
-                        ServicePort.newBuilder()
-                            .setType(entry.getKey())
-                            .setValue(entry.getValue()).build())
-                .collect(Collectors.toList()));
-    return builder.build();
-  }
-
-  /**
-   * Converts KeySpaceManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}.
-   *
-   * @return {@link ServiceInfo}
-   */
-  @JsonIgnore
-  public static ServiceInfo getFromProtobuf(
-      KeySpaceManagerProtocolProtos.ServiceInfo serviceInfo) {
-    return new ServiceInfo(serviceInfo.getNodeType(),
-        serviceInfo.getHostname(),
-        serviceInfo.getServicePortsList());
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Parse a JSON string into ServiceInfo Object.
-   *
-   * @param jsonString Json String
-   * @return BucketInfo
-   * @throws IOException
-   */
-  public static BucketInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-  /**
-   * Creates a new builder to build {@link ServiceInfo}.
-   * @return {@link ServiceInfo.Builder}
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder used to build/construct {@link ServiceInfo}.
-   */
-  public static class Builder {
-
-    private NodeType node;
-    private String host;
-    private List<ServicePort> portList = new ArrayList<>();
-
-
-    /**
-     * Sets the node/service type.
-     * @param nodeType type of node
-     * @return the builder
-     */
-    public Builder setNodeType(NodeType nodeType) {
-      node = nodeType;
-      return this;
-    }
-
-    /**
-     * Sets the hostname of the service.
-     * @param hostname service hostname
-     * @return the builder
-     */
-    public Builder setHostname(String hostname) {
-      host = hostname;
-      return this;
-    }
-
-    /**
-     * Adds the service port to the service port list.
-     * @param servicePort RPC port
-     * @return the builder
-     */
-    public Builder addServicePort(ServicePort servicePort) {
-      portList.add(servicePort);
-      return this;
-    }
-
-
-    /**
-     * Builds and returns {@link ServiceInfo} with the set values.
-     * @return {@link ServiceInfo}
-     */
-    public ServiceInfo build() {
-      return new ServiceInfo(node, host, portList);
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java
deleted file mode 100644
index 1a3d486..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/VolumeArgs.java
+++ /dev/null
@@ -1,140 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import com.google.common.base.Preconditions;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * A class that encapsulates the createVolume Args.
- */
-public final class VolumeArgs {
-  private final String adminName;
-  private final String ownerName;
-  private final String volume;
-  private final long quotaInBytes;
-  private final Map<String, String> extendedAttributes;
-
-  /**
-   * Private constructor, constructed via builder.
-   *
-   * @param adminName - Administrator name.
-   * @param ownerName - Volume owner's name
-   * @param volume - volume name
-   * @param quotaInBytes - Volume Quota in bytes.
-   * @param keyValueMap - keyValue map.
-   */
-  private VolumeArgs(String adminName, String ownerName, String volume,
-      long quotaInBytes, Map<String, String> keyValueMap) {
-    this.adminName = adminName;
-    this.ownerName = ownerName;
-    this.volume = volume;
-    this.quotaInBytes = quotaInBytes;
-    this.extendedAttributes = keyValueMap;
-  }
-
-  /**
-   * Returns the Admin Name.
-   *
-   * @return String.
-   */
-  public String getAdminName() {
-    return adminName;
-  }
-
-  /**
-   * Returns the owner Name.
-   *
-   * @return String
-   */
-  public String getOwnerName() {
-    return ownerName;
-  }
-
-  /**
-   * Returns the volume Name.
-   *
-   * @return String
-   */
-  public String getVolume() {
-    return volume;
-  }
-
-  /**
-   * Returns Quota in Bytes.
-   *
-   * @return long, Quota in bytes.
-   */
-  public long getQuotaInBytes() {
-    return quotaInBytes;
-  }
-
-  public Map<String, String> getExtendedAttributes() {
-    return extendedAttributes;
-  }
-
-  static class Builder {
-    private String adminName;
-    private String ownerName;
-    private String volume;
-    private long quotaInBytes;
-    private Map<String, String> extendedAttributes;
-
-    /**
-     * Constructs a builder.
-     */
-    Builder() {
-      extendedAttributes = new HashMap<>();
-    }
-
-    public void setAdminName(String adminName) {
-      this.adminName = adminName;
-    }
-
-    public void setOwnerName(String ownerName) {
-      this.ownerName = ownerName;
-    }
-
-    public void setVolume(String volume) {
-      this.volume = volume;
-    }
-
-    public void setQuotaInBytes(long quotaInBytes) {
-      this.quotaInBytes = quotaInBytes;
-    }
-
-    public void addMetadata(String key, String value) {
-      extendedAttributes.put(key, value); // overwrite if present.
-    }
-
-    /**
-     * Constructs a CreateVolumeArgument.
-     *
-     * @return CreateVolumeArgs.
-     */
-    public VolumeArgs build() {
-      Preconditions.checkNotNull(adminName);
-      Preconditions.checkNotNull(ownerName);
-      Preconditions.checkNotNull(volume);
-      return new VolumeArgs(adminName, ownerName, volume, quotaInBytes,
-          extendedAttributes);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java
deleted file mode 100644
index ce627a5..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/package-info.java
+++ /dev/null
@@ -1,18 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
deleted file mode 100644
index 7698ee1..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-/**
- This package contains client side protocol library to communicate with KSM.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
deleted file mode 100644
index 54862d3..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/KeySpaceManagerProtocol.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.protocol;
-
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import java.io.IOException;
-import java.util.List;
-
-/**
- * Protocol to talk to KSM.
- */
-public interface KeySpaceManagerProtocol {
-
-  /**
-   * Creates a volume.
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  void createVolume(KsmVolumeArgs args) throws IOException;
-
-  /**
-   * Changes the owner of a volume.
-   * @param volume  - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  void setOwner(String volume, String owner) throws IOException;
-
-  /**
-   * Changes the Quota on a volume.
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  void setQuota(String volume, long quota) throws IOException;
-
-  /**
-   * Checks if the specified user can access this volume.
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume,
-   *         false otherwise
-   * @throws IOException
-   */
-  boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException;
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  KsmVolumeArgs getVolumeInfo(String volume) throws IOException;
-
-  /**
-   * Deletes an existing empty volume.
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  void deleteVolume(String volume) throws IOException;
-
-  /**
-   * Lists volume owned by a specific user.
-   * @param userName - user name
-   * @param prefix  - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  List<KsmVolumeArgs> listVolumeByUser(String userName, String prefix, String
-      prevKey, int maxKeys) throws IOException;
-
-  /**
-   * Lists volume all volumes in the cluster.
-   * @param prefix  - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  List<KsmVolumeArgs> listAllVolumes(String prefix, String
-      prevKey, int maxKeys) throws IOException;
-
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - BucketInfo to create Bucket.
-   * @throws IOException
-   */
-  void createBucket(KsmBucketInfo bucketInfo) throws IOException;
-
-  /**
-   * Gets the bucket information.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @return KsmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  KsmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  void setBucketProperty(KsmBucketArgs args) throws IOException;
-
-  /**
-   * Open the given key and return an open key session.
-   *
-   * @param args the args of the key.
-   * @return OpenKeySession instance that client uses to talk to container.
-   * @throws IOException
-   */
-  OpenKeySession openKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Commit a key. This will make the change from the client visible. The client
-   * is identified by the clientID.
-   *
-   * @param args the key to commit
-   * @param clientID the client identification
-   * @throws IOException
-   */
-  void commitKey(KsmKeyArgs args, int clientID) throws IOException;
-
-  /**
-   * Allocate a new block, it is assumed that the client is having an open key
-   * session going on. This block will be appended to this open key session.
-   *
-   * @param args the key to append
-   * @param clientID the client identification
-   * @return an allocated block
-   * @throws IOException
-   */
-  KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException;
-
-  /**
-   * Look up for the container of an existing key.
-   *
-   * @param args the args of the key.
-   * @return KsmKeyInfo instance that client uses to talk to container.
-   * @throws IOException
-   */
-  KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Rename an existing key within a bucket
-   * @param args the args of the key.
-   * @param toKeyName New name to be used for the Key
-   */
-  void renameKey(KsmKeyArgs args, String toKeyName) throws IOException;
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args the args of the key.
-   * @throws IOException
-   */
-  void deleteKey(KsmKeyArgs args) throws IOException;
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  void deleteBucket(String volume, String bucket) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link KsmBucketInfo}
-   * in the given volume. Argument volumeName is required, others
-   * are optional.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param startBucketName
-   *   the start bucket name, only the buckets whose name is
-   *   after this value will be included in the result.
-   * @param bucketPrefix
-   *   bucket name prefix, only the buckets whose name has
-   *   this prefix will be included in the result.
-   * @param maxNumOfBuckets
-   *   the maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<KsmBucketInfo> listBuckets(String volumeName,
-      String startBucketName, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link KsmKeyInfo}
-   * in the given bucket. Argument volumeName, bucketName is required,
-   * others are optional.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param bucketName
-   *   the name of the bucket.
-   * @param startKeyName
-   *   the start key name, only the keys whose name is
-   *   after this value will be included in the result.
-   * @param keyPrefix
-   *   key name prefix, only the keys whose name has
-   *   this prefix will be included in the result.
-   * @param maxKeys
-   *   the maximum number of keys to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<KsmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKeyName, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns list of Ozone services with its configuration details.
-   *
-   * @return list of Ozone services
-   * @throws IOException
-   */
-  List<ServiceInfo> getServiceList() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java
deleted file mode 100644
index f77e5fd..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocol/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.protocol;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
deleted file mode 100644
index 0f38169..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolClientSideTranslatorPB.java
+++ /dev/null
@@ -1,769 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.protocolPB;
-
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListResponse;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.stream.Collectors;
-
-/**
- *  The client side implementation of KeySpaceManagerProtocol.
- */
-
-@InterfaceAudience.Private
-public final class KeySpaceManagerProtocolClientSideTranslatorPB
-    implements KeySpaceManagerProtocol, ProtocolTranslator, Closeable {
-
-  /**
-   * RpcController is not used and hence is set to null.
-   */
-  private static final RpcController NULL_RPC_CONTROLLER = null;
-
-  private final KeySpaceManagerProtocolPB rpcProxy;
-
-  /**
-   * Constructor for KeySpaceManger Client.
-   * @param rpcProxy
-   */
-  public KeySpaceManagerProtocolClientSideTranslatorPB(
-      KeySpaceManagerProtocolPB rpcProxy) {
-    this.rpcProxy = rpcProxy;
-  }
-
-  /**
-   * Closes this stream and releases any system resources associated
-   * with it. If the stream is already closed then invoking this
-   * method has no effect.
-   * <p>
-   * <p> As noted in {@link AutoCloseable#close()}, cases where the
-   * close may fail require careful attention. It is strongly advised
-   * to relinquish the underlying resources and to internally
-   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
-   * the {@code IOException}.
-   *
-   * @throws IOException if an I/O error occurs
-   */
-  @Override
-  public void close() throws IOException {
-
-  }
-
-  /**
-   * Creates a volume.
-   *
-   * @param args - Arguments to create Volume.
-   * @throws IOException
-   */
-  @Override
-  public void createVolume(KsmVolumeArgs args) throws IOException {
-    CreateVolumeRequest.Builder req =
-        CreateVolumeRequest.newBuilder();
-    VolumeInfo volumeInfo = args.getProtobuf();
-    req.setVolumeInfo(volumeInfo);
-
-    final CreateVolumeResponse resp;
-    try {
-      resp = rpcProxy.createVolume(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Volume creation failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner) throws IOException {
-    SetVolumePropertyRequest.Builder req =
-        SetVolumePropertyRequest.newBuilder();
-    req.setVolumeName(volume).setOwnerName(owner);
-    final SetVolumePropertyResponse resp;
-    try {
-      resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Volume owner change failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  @Override
-  public void setQuota(String volume, long quota) throws IOException {
-    SetVolumePropertyRequest.Builder req =
-        SetVolumePropertyRequest.newBuilder();
-    req.setVolumeName(volume).setQuotaInBytes(quota);
-    final SetVolumePropertyResponse resp;
-    try {
-      resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Volume quota change failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Checks if the specified user can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acls which needs to be checked for access
-   * @return true if the user has required access for the volume,
-   *         false otherwise
-   * @throws IOException
-   */
-  @Override
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws
-      IOException {
-    CheckVolumeAccessRequest.Builder req =
-        CheckVolumeAccessRequest.newBuilder();
-    req.setVolumeName(volume).setUserAcl(userAcl);
-    final CheckVolumeAccessResponse resp;
-    try {
-      resp = rpcProxy.checkVolumeAccess(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() == Status.ACCESS_DENIED) {
-      return false;
-    } else if (resp.getStatus() == Status.OK) {
-      return true;
-    } else {
-      throw new
-          IOException("Check Volume Access failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   *
-   * @param volume - Volume name.
-   * @return KsmVolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder();
-    req.setVolumeName(volume);
-    final InfoVolumeResponse resp;
-    try {
-      resp = rpcProxy.infoVolume(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Info Volume failed, error:" + resp.getStatus());
-    }
-    return KsmVolumeArgs.getFromProtobuf(resp.getVolumeInfo());
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder();
-    req.setVolumeName(volume);
-    final DeleteVolumeResponse resp;
-    try {
-      resp = rpcProxy.deleteVolume(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Delete Volume failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * Lists volume owned by a specific user.
-   *
-   * @param userName - user name
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<KsmVolumeArgs> listVolumeByUser(String userName, String prefix,
-                                              String prevKey, int maxKeys)
-      throws IOException {
-    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
-    if (!Strings.isNullOrEmpty(prefix)) {
-      builder.setPrefix(prefix);
-    }
-    if (!Strings.isNullOrEmpty(prevKey)) {
-      builder.setPrevKey(prevKey);
-    }
-    builder.setMaxKeys(maxKeys);
-    builder.setUserName(userName);
-    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_USER);
-    return listVolume(builder.build());
-  }
-
-  /**
-   * Lists volume all volumes in the cluster.
-   *
-   * @param prefix - Filter prefix -- Return only entries that match this.
-   * @param prevKey - Previous key -- List starts from the next from the
-   * prevkey
-   * @param maxKeys - Max number of keys to return.
-   * @return List of Volumes.
-   * @throws IOException
-   */
-  @Override
-  public List<KsmVolumeArgs> listAllVolumes(String prefix, String prevKey,
-      int maxKeys) throws IOException {
-    ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
-    if (!Strings.isNullOrEmpty(prefix)) {
-      builder.setPrefix(prefix);
-    }
-    if (!Strings.isNullOrEmpty(prevKey)) {
-      builder.setPrevKey(prevKey);
-    }
-    builder.setMaxKeys(maxKeys);
-    builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER);
-    return listVolume(builder.build());
-  }
-
-  private List<KsmVolumeArgs> listVolume(ListVolumeRequest request)
-      throws IOException {
-    final ListVolumeResponse resp;
-    try {
-      resp = rpcProxy.listVolumes(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("List volume failed, error: "
-          + resp.getStatus());
-    }
-
-    List<KsmVolumeArgs> result = Lists.newArrayList();
-    for (VolumeInfo volInfo : resp.getVolumeInfoList()) {
-      KsmVolumeArgs volArgs = KsmVolumeArgs.getFromProtobuf(volInfo);
-      result.add(volArgs);
-    }
-
-    return resp.getVolumeInfoList().stream()
-        .map(item -> KsmVolumeArgs.getFromProtobuf(item))
-        .collect(Collectors.toList());
-  }
-
-  /**
-   * Creates a bucket.
-   *
-   * @param bucketInfo - BucketInfo to create bucket.
-   * @throws IOException
-   */
-  @Override
-  public void createBucket(KsmBucketInfo bucketInfo) throws IOException {
-    CreateBucketRequest.Builder req =
-        CreateBucketRequest.newBuilder();
-    BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf();
-    req.setBucketInfo(bucketInfoProtobuf);
-
-    final CreateBucketResponse resp;
-    try {
-      resp = rpcProxy.createBucket(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Bucket creation failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * Gets the bucket information.
-   *
-   * @param volume - Volume name.
-   * @param bucket - Bucket name.
-   * @return KsmBucketInfo or exception is thrown.
-   * @throws IOException
-   */
-  @Override
-  public KsmBucketInfo getBucketInfo(String volume, String bucket)
-      throws IOException {
-    InfoBucketRequest.Builder req =
-        InfoBucketRequest.newBuilder();
-    req.setVolumeName(volume);
-    req.setBucketName(bucket);
-
-    final InfoBucketResponse resp;
-    try {
-      resp = rpcProxy.infoBucket(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() == Status.OK) {
-      return KsmBucketInfo.getFromProtobuf(resp.getBucketInfo());
-    } else {
-      throw new IOException("Info Bucket failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  @Override
-  public void setBucketProperty(KsmBucketArgs args)
-      throws IOException {
-    SetBucketPropertyRequest.Builder req =
-        SetBucketPropertyRequest.newBuilder();
-    BucketArgs bucketArgs = args.getProtobuf();
-    req.setBucketArgs(bucketArgs);
-    final SetBucketPropertyResponse resp;
-    try {
-      resp = rpcProxy.setBucketProperty(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Setting bucket property failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * List buckets in a volume.
-   *
-   * @param volumeName
-   * @param startKey
-   * @param prefix
-   * @param count
-   * @return
-   * @throws IOException
-   */
-  @Override
-  public List<KsmBucketInfo> listBuckets(String volumeName,
-      String startKey, String prefix, int count) throws IOException {
-    List<KsmBucketInfo> buckets = new ArrayList<>();
-    ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder();
-    reqBuilder.setVolumeName(volumeName);
-    reqBuilder.setCount(count);
-    if (startKey != null) {
-      reqBuilder.setStartKey(startKey);
-    }
-    if (prefix != null) {
-      reqBuilder.setPrefix(prefix);
-    }
-    ListBucketsRequest request = reqBuilder.build();
-    final ListBucketsResponse resp;
-    try {
-      resp = rpcProxy.listBuckets(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() == Status.OK) {
-      buckets.addAll(
-          resp.getBucketInfoList().stream()
-              .map(KsmBucketInfo::getFromProtobuf)
-              .collect(Collectors.toList()));
-      return buckets;
-    } else {
-      throw new IOException("List Buckets failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * Create a new open session of the key, then use the returned meta info to
-   * talk to data node to actually write the key.
-   * @param args the args for the key to be allocated
-   * @return a handler to the key, returned client
-   * @throws IOException
-   */
-  @Override
-  public OpenKeySession openKey(KsmKeyArgs args) throws IOException {
-    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
-    KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setFactor(args.getFactor())
-        .setType(args.getType())
-        .setKeyName(args.getKeyName());
-    if (args.getDataSize() > 0) {
-      keyArgs.setDataSize(args.getDataSize());
-    }
-    req.setKeyArgs(keyArgs.build());
-
-    final LocateKeyResponse resp;
-    try {
-      resp = rpcProxy.createKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Create key failed, error:" + resp.getStatus());
-    }
-    return new OpenKeySession(resp.getID(),
-        KsmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion());
-  }
-
-  @Override
-  public KsmKeyLocationInfo allocateBlock(KsmKeyArgs args, int clientID)
-      throws IOException {
-    AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-    req.setClientID(clientID);
-
-    final AllocateBlockResponse resp;
-    try {
-      resp = rpcProxy.allocateBlock(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Allocate block failed, error:" +
-          resp.getStatus());
-    }
-    return KsmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation());
-  }
-
-  @Override
-  public void commitKey(KsmKeyArgs args, int clientID)
-      throws IOException {
-    CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-    req.setClientID(clientID);
-
-    final CommitKeyResponse resp;
-    try {
-      resp = rpcProxy.commitKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Commit key failed, error:" +
-          resp.getStatus());
-    }
-  }
-
-
-  @Override
-  public KsmKeyInfo lookupKey(KsmKeyArgs args) throws IOException {
-    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-
-    final LocateKeyResponse resp;
-    try {
-      resp = rpcProxy.lookupKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Lookup key failed, error:" +
-          resp.getStatus());
-    }
-    return KsmKeyInfo.getFromProtobuf(resp.getKeyInfo());
-  }
-
-  @Override
-  public void renameKey(KsmKeyArgs args, String toKeyName) throws IOException {
-    RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName())
-        .setDataSize(args.getDataSize()).build();
-    req.setKeyArgs(keyArgs);
-    req.setToKeyName(toKeyName);
-
-    final RenameKeyResponse resp;
-    try {
-      resp = rpcProxy.renameKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Rename key failed, error:" +
-          resp.getStatus());
-    }
-  }
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param args the args of the key.
-   * @throws IOException
-   */
-  @Override
-  public void deleteKey(KsmKeyArgs args) throws IOException {
-    LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
-    KeyArgs keyArgs = KeyArgs.newBuilder()
-        .setVolumeName(args.getVolumeName())
-        .setBucketName(args.getBucketName())
-        .setKeyName(args.getKeyName()).build();
-    req.setKeyArgs(keyArgs);
-
-    final LocateKeyResponse resp;
-    try {
-      resp = rpcProxy.deleteKey(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new IOException("Delete key failed, error:" +
-          resp.getStatus());
-    }
-  }
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volume - Name of the volume.
-   * @param bucket - Name of the bucket.
-   * @throws IOException
-   */
-  public void deleteBucket(String volume, String bucket) throws IOException {
-    DeleteBucketRequest.Builder req = DeleteBucketRequest.newBuilder();
-    req.setVolumeName(volume);
-    req.setBucketName(bucket);
-    final DeleteBucketResponse resp;
-    try {
-      resp = rpcProxy.deleteBucket(NULL_RPC_CONTROLLER, req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    if (resp.getStatus() != Status.OK) {
-      throw new
-          IOException("Delete Bucket failed, error:" + resp.getStatus());
-    }
-  }
-
-  /**
-   * List keys in a bucket.
-   */
-  @Override
-  public List<KsmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String prefix, int maxKeys) throws IOException {
-    List<KsmKeyInfo> keys = new ArrayList<>();
-    ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder();
-    reqBuilder.setVolumeName(volumeName);
-    reqBuilder.setBucketName(bucketName);
-    reqBuilder.setCount(maxKeys);
-
-    if (startKey != null) {
-      reqBuilder.setStartKey(startKey);
-    }
-
-    if (prefix != null) {
-      reqBuilder.setPrefix(prefix);
-    }
-
-    ListKeysRequest request = reqBuilder.build();
-    final ListKeysResponse resp;
-    try {
-      resp = rpcProxy.listKeys(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() == Status.OK) {
-      keys.addAll(
-          resp.getKeyInfoList().stream()
-              .map(KsmKeyInfo::getFromProtobuf)
-              .collect(Collectors.toList()));
-      return keys;
-    } else {
-      throw new IOException("List Keys failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  @Override
-  public List<ServiceInfo> getServiceList() throws IOException {
-    ServiceListRequest request = ServiceListRequest.newBuilder().build();
-    final ServiceListResponse resp;
-    try {
-      resp = rpcProxy.getServiceList(NULL_RPC_CONTROLLER, request);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-
-    if (resp.getStatus() == Status.OK) {
-      return resp.getServiceInfoList().stream()
-              .map(ServiceInfo::getFromProtobuf)
-              .collect(Collectors.toList());
-    } else {
-      throw new IOException("Getting service list failed, error: "
-          + resp.getStatus());
-    }
-  }
-
-  /**
-   * Return the proxy object underlying this protocol translator.
-   *
-   * @return the proxy object underlying this protocol translator.
-   */
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
deleted file mode 100644
index 8acca8a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/KeySpaceManagerProtocolPB.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeySpaceManagerService;
-
-/**
- * Protocol used to communicate with KSM.
- */
-@ProtocolInfo(protocolName =
-    "org.apache.hadoop.ozone.protocol.KeySpaceManagerProtocol",
-    protocolVersion = 1)
-@InterfaceAudience.Private
-public interface KeySpaceManagerProtocolPB
-    extends KeySpaceManagerService.BlockingInterface {
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
deleted file mode 100644
index 67f9f7b..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/protocolPB/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.protocolPB;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
new file mode 100644
index 0000000..b9ca296
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+/**
+ * Ozone Manager Constants.
+ */
+public final class OMConfigKeys {
+  /**
+   * Never constructed.
+   */
+  private OMConfigKeys() {
+  }
+
+
+  public static final String OZONE_OM_HANDLER_COUNT_KEY =
+      "ozone.om.handler.count.key";
+  public static final int OZONE_OM_HANDLER_COUNT_DEFAULT = 20;
+
+  public static final String OZONE_OM_ADDRESS_KEY =
+      "ozone.om.address";
+  public static final String OZONE_OM_BIND_HOST_DEFAULT =
+      "0.0.0.0";
+  public static final int OZONE_OM_PORT_DEFAULT = 9862;
+
+  public static final String OZONE_OM_HTTP_ENABLED_KEY =
+      "ozone.om.http.enabled";
+  public static final String OZONE_OM_HTTP_BIND_HOST_KEY =
+      "ozone.om.http-bind-host";
+  public static final String OZONE_OM_HTTPS_BIND_HOST_KEY =
+      "ozone.om.https-bind-host";
+  public static final String OZONE_OM_HTTP_ADDRESS_KEY =
+      "ozone.om.http-address";
+  public static final String OZONE_OM_HTTPS_ADDRESS_KEY =
+      "ozone.om.https-address";
+  public static final String OZONE_OM_KEYTAB_FILE =
+      "ozone.om.keytab.file";
+  public static final String OZONE_OM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
+  public static final int OZONE_OM_HTTP_BIND_PORT_DEFAULT = 9874;
+  public static final int OZONE_OM_HTTPS_BIND_PORT_DEFAULT = 9875;
+
+  // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB.
+  public static final String OZONE_OM_DB_CACHE_SIZE_MB =
+      "ozone.om.db.cache.size.mb";
+  public static final int OZONE_OM_DB_CACHE_SIZE_DEFAULT = 128;
+
+  public static final String OZONE_OM_USER_MAX_VOLUME =
+      "ozone.om.user.max.volume";
+  public static final int OZONE_OM_USER_MAX_VOLUME_DEFAULT = 1024;
+
+  // OM Default user/group permissions
+  public static final String OZONE_OM_USER_RIGHTS =
+      "ozone.om.user.rights";
+  public static final OzoneAcl.OzoneACLRights OZONE_OM_USER_RIGHTS_DEFAULT =
+      OzoneAcl.OzoneACLRights.READ_WRITE;
+
+  public static final String OZONE_OM_GROUP_RIGHTS =
+      "ozone.om.group.rights";
+  public static final OzoneAcl.OzoneACLRights OZONE_OM_GROUP_RIGHTS_DEFAULT =
+      OzoneAcl.OzoneACLRights.READ_WRITE;
+
+  public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
+      "ozone.key.deleting.limit.per.task";
+  public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
new file mode 100644
index 0000000..6aabfef
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java
@@ -0,0 +1,233 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketArgs;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
+
+/**
+ * A class that encapsulates Bucket Arguments.
+ */
+public final class OmBucketArgs {
+  /**
+   * Name of the volume in which the bucket belongs to.
+   */
+  private final String volumeName;
+  /**
+   * Name of the bucket.
+   */
+  private final String bucketName;
+  /**
+   * ACL's that are to be added for the bucket.
+   */
+  private List<OzoneAcl> addAcls;
+  /**
+   * ACL's that are to be removed from the bucket.
+   */
+  private List<OzoneAcl> removeAcls;
+  /**
+   * Bucket Version flag.
+   */
+  private Boolean isVersionEnabled;
+  /**
+   * Type of storage to be used for this bucket.
+   * [RAM_DISK, SSD, DISK, ARCHIVE]
+   */
+  private StorageType storageType;
+
+  /**
+   * Private constructor, constructed via builder.
+   * @param volumeName - Volume name.
+   * @param bucketName - Bucket name.
+   * @param addAcls - ACL's to be added.
+   * @param removeAcls - ACL's to be removed.
+   * @param isVersionEnabled - Bucket version flag.
+   * @param storageType - Storage type to be used.
+   */
+  private OmBucketArgs(String volumeName, String bucketName,
+                       List<OzoneAcl> addAcls, List<OzoneAcl> removeAcls,
+                       Boolean isVersionEnabled, StorageType storageType) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.addAcls = addAcls;
+    this.removeAcls = removeAcls;
+    this.isVersionEnabled = isVersionEnabled;
+    this.storageType = storageType;
+  }
+
+  /**
+   * Returns the Volume Name.
+   * @return String.
+   */
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  /**
+   * Returns the Bucket Name.
+   * @return String
+   */
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  /**
+   * Returns the ACL's that are to be added.
+   * @return List<OzoneAclInfo>
+   */
+  public List<OzoneAcl> getAddAcls() {
+    return addAcls;
+  }
+
+  /**
+   * Returns the ACL's that are to be removed.
+   * @return List<OzoneAclInfo>
+   */
+  public List<OzoneAcl> getRemoveAcls() {
+    return removeAcls;
+  }
+
+  /**
+   * Returns true if bucket version is enabled, else false.
+   * @return isVersionEnabled
+   */
+  public Boolean getIsVersionEnabled() {
+    return isVersionEnabled;
+  }
+
+  /**
+   * Returns the type of storage to be used.
+   * @return StorageType
+   */
+  public StorageType getStorageType() {
+    return storageType;
+  }
+
+  /**
+   * Returns new builder class that builds a OmBucketArgs.
+   *
+   * @return Builder
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for OmBucketArgs.
+   */
+  public static class Builder {
+    private String volumeName;
+    private String bucketName;
+    private List<OzoneAcl> addAcls;
+    private List<OzoneAcl> removeAcls;
+    private Boolean isVersionEnabled;
+    private StorageType storageType;
+
+    public Builder setVolumeName(String volume) {
+      this.volumeName = volume;
+      return this;
+    }
+
+    public Builder setBucketName(String bucket) {
+      this.bucketName = bucket;
+      return this;
+    }
+
+    public Builder setAddAcls(List<OzoneAcl> acls) {
+      this.addAcls = acls;
+      return this;
+    }
+
+    public Builder setRemoveAcls(List<OzoneAcl> acls) {
+      this.removeAcls = acls;
+      return this;
+    }
+
+    public Builder setIsVersionEnabled(Boolean versionFlag) {
+      this.isVersionEnabled = versionFlag;
+      return this;
+    }
+
+    public Builder setStorageType(StorageType storage) {
+      this.storageType = storage;
+      return this;
+    }
+
+    /**
+     * Constructs the OmBucketArgs.
+     * @return instance of OmBucketArgs.
+     */
+    public OmBucketArgs build() {
+      Preconditions.checkNotNull(volumeName);
+      Preconditions.checkNotNull(bucketName);
+      return new OmBucketArgs(volumeName, bucketName, addAcls,
+          removeAcls, isVersionEnabled, storageType);
+    }
+  }
+
+  /**
+   * Creates BucketArgs protobuf from OmBucketArgs.
+   */
+  public BucketArgs getProtobuf() {
+    BucketArgs.Builder builder = BucketArgs.newBuilder();
+    builder.setVolumeName(volumeName)
+        .setBucketName(bucketName);
+    if(addAcls != null && !addAcls.isEmpty()) {
+      builder.addAllAddAcls(addAcls.stream().map(
+          OMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
+    }
+    if(removeAcls != null && !removeAcls.isEmpty()) {
+      builder.addAllRemoveAcls(removeAcls.stream().map(
+          OMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
+    }
+    if(isVersionEnabled != null) {
+      builder.setIsVersionEnabled(isVersionEnabled);
+    }
+    if(storageType != null) {
+      builder.setStorageType(
+          PBHelperClient.convertStorageType(storageType));
+    }
+    return builder.build();
+  }
+
+  /**
+   * Parses BucketInfo protobuf and creates OmBucketArgs.
+   * @param bucketArgs
+   * @return instance of OmBucketArgs
+   */
+  public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) {
+    return new OmBucketArgs(bucketArgs.getVolumeName(),
+        bucketArgs.getBucketName(),
+        bucketArgs.getAddAclsList().stream().map(
+            OMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
+        bucketArgs.getRemoveAclsList().stream().map(
+            OMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
+        bucketArgs.hasIsVersionEnabled() ?
+            bucketArgs.getIsVersionEnabled() : null,
+        bucketArgs.hasStorageType() ? PBHelperClient.convertStorageType(
+            bucketArgs.getStorageType()) : null);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
new file mode 100644
index 0000000..bf5abdd
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java
@@ -0,0 +1,235 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
+
+import java.util.LinkedList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * A class that encapsulates Bucket Info.
+ */
+public final class OmBucketInfo {
+  /**
+   * Name of the volume in which the bucket belongs to.
+   */
+  private final String volumeName;
+  /**
+   * Name of the bucket.
+   */
+  private final String bucketName;
+  /**
+   * ACL Information.
+   */
+  private List<OzoneAcl> acls;
+  /**
+   * Bucket Version flag.
+   */
+  private Boolean isVersionEnabled;
+  /**
+   * Type of storage to be used for this bucket.
+   * [RAM_DISK, SSD, DISK, ARCHIVE]
+   */
+  private StorageType storageType;
+  /**
+   * Creation time of bucket.
+   */
+  private final long creationTime;
+
+  /**
+   * Private constructor, constructed via builder.
+   * @param volumeName - Volume name.
+   * @param bucketName - Bucket name.
+   * @param acls - list of ACLs.
+   * @param isVersionEnabled - Bucket version flag.
+   * @param storageType - Storage type to be used.
+   * @param creationTime - Bucket creation time.
+   */
+  private OmBucketInfo(String volumeName, String bucketName,
+                       List<OzoneAcl> acls, boolean isVersionEnabled,
+                       StorageType storageType, long creationTime) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.acls = acls;
+    this.isVersionEnabled = isVersionEnabled;
+    this.storageType = storageType;
+    this.creationTime = creationTime;
+  }
+
+  /**
+   * Returns the Volume Name.
+   * @return String.
+   */
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  /**
+   * Returns the Bucket Name.
+   * @return String
+   */
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  /**
+   * Returns the ACL's associated with this bucket.
+   * @return List<OzoneAcl>
+   */
+  public List<OzoneAcl> getAcls() {
+    return acls;
+  }
+
+  /**
+   * Returns true if bucket version is enabled, else false.
+   * @return isVersionEnabled
+   */
+  public boolean getIsVersionEnabled() {
+    return isVersionEnabled;
+  }
+
+  /**
+   * Returns the type of storage to be used.
+   * @return StorageType
+   */
+  public StorageType getStorageType() {
+    return storageType;
+  }
+
+  /**
+   * Returns creation time.
+   *
+   * @return long
+   */
+  public long getCreationTime() {
+    return creationTime;
+  }
+
+  /**
+   * Returns new builder class that builds a OmBucketInfo.
+   *
+   * @return Builder
+   */
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  /**
+   * Builder for OmBucketInfo.
+   */
+  public static class Builder {
+    private String volumeName;
+    private String bucketName;
+    private List<OzoneAcl> acls;
+    private Boolean isVersionEnabled;
+    private StorageType storageType;
+    private long creationTime;
+
+    Builder() {
+      //Default values
+      this.acls = new LinkedList<>();
+      this.isVersionEnabled = false;
+      this.storageType = StorageType.DISK;
+    }
+
+    public Builder setVolumeName(String volume) {
+      this.volumeName = volume;
+      return this;
+    }
+
+    public Builder setBucketName(String bucket) {
+      this.bucketName = bucket;
+      return this;
+    }
+
+    public Builder setAcls(List<OzoneAcl> listOfAcls) {
+      this.acls = listOfAcls;
+      return this;
+    }
+
+    public Builder setIsVersionEnabled(Boolean versionFlag) {
+      this.isVersionEnabled = versionFlag;
+      return this;
+    }
+
+    public Builder setStorageType(StorageType storage) {
+      this.storageType = storage;
+      return this;
+    }
+
+    public Builder setCreationTime(long createdOn) {
+      this.creationTime = createdOn;
+      return this;
+    }
+
+    /**
+     * Constructs the OmBucketInfo.
+     * @return instance of OmBucketInfo.
+     */
+    public OmBucketInfo build() {
+      Preconditions.checkNotNull(volumeName);
+      Preconditions.checkNotNull(bucketName);
+      Preconditions.checkNotNull(acls);
+      Preconditions.checkNotNull(isVersionEnabled);
+      Preconditions.checkNotNull(storageType);
+
+      return new OmBucketInfo(volumeName, bucketName, acls,
+          isVersionEnabled, storageType, creationTime);
+    }
+  }
+
+  /**
+   * Creates BucketInfo protobuf from OmBucketInfo.
+   */
+  public BucketInfo getProtobuf() {
+    return BucketInfo.newBuilder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .addAllAcls(acls.stream().map(
+            OMPBHelper::convertOzoneAcl).collect(Collectors.toList()))
+        .setIsVersionEnabled(isVersionEnabled)
+        .setStorageType(PBHelperClient.convertStorageType(
+            storageType))
+        .setCreationTime(creationTime)
+        .build();
+  }
+
+  /**
+   * Parses BucketInfo protobuf and creates OmBucketInfo.
+   * @param bucketInfo
+   * @return instance of OmBucketInfo
+   */
+  public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) {
+    return new OmBucketInfo(
+        bucketInfo.getVolumeName(),
+        bucketInfo.getBucketName(),
+        bucketInfo.getAclsList().stream().map(
+            OMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
+        bucketInfo.getIsVersionEnabled(),
+        PBHelperClient.convertStorageType(
+            bucketInfo.getStorageType()), bucketInfo.getCreationTime());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
new file mode 100644
index 0000000..1f8ed5f
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+
+/**
+ * Args for key. Client use this to specify key's attributes on  key creation
+ * (putKey()).
+ */
+public final class OmKeyArgs {
+  private final String volumeName;
+  private final String bucketName;
+  private final String keyName;
+  private long dataSize;
+  private final ReplicationType type;
+  private final ReplicationFactor factor;
+
+  private OmKeyArgs(String volumeName, String bucketName, String keyName,
+                    long dataSize, ReplicationType type, ReplicationFactor factor) {
+    this.volumeName = volumeName;
+    this.bucketName = bucketName;
+    this.keyName = keyName;
+    this.dataSize = dataSize;
+    this.type = type;
+    this.factor = factor;
+  }
+
+  public ReplicationType getType() {
+    return type;
+  }
+
+  public ReplicationFactor getFactor() {
+    return factor;
+  }
+
+  public String getVolumeName() {
+    return volumeName;
+  }
+
+  public String getBucketName() {
+    return bucketName;
+  }
+
+  public String getKeyName() {
+    return keyName;
+  }
+
+  public long getDataSize() {
+    return dataSize;
+  }
+
+  public void setDataSize(long size) {
+    dataSize = size;
+  }
+
+  /**
+   * Builder class of OmKeyArgs.
+   */
+  public static class Builder {
+    private String volumeName;
+    private String bucketName;
+    private String keyName;
+    private long dataSize;
+    private ReplicationType type;
+    private ReplicationFactor factor;
+
+
+    public Builder setVolumeName(String volume) {
+      this.volumeName = volume;
+      return this;
+    }
+
+    public Builder setBucketName(String bucket) {
+      this.bucketName = bucket;
+      return this;
+    }
+
+    public Builder setKeyName(String key) {
+      this.keyName = key;
+      return this;
+    }
+
+    public Builder setDataSize(long size) {
+      this.dataSize = size;
+      return this;
+    }
+
+    public Builder setType(ReplicationType replicationType) {
+      this.type = replicationType;
+      return this;
+    }
+
+    public Builder setFactor(ReplicationFactor replicationFactor) {
+      this.factor = replicationFactor;
+      return this;
+    }
+
+    public OmKeyArgs build() {
+      return new OmKeyArgs(volumeName, bucketName, keyName, dataSize,
+          type, factor);
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-7556. Fair scheduler configuration should allow resource types in the minResources and maxResources properties. (Daniel Templeton and Szilard Nemeth via Haibo Chen)

Posted by vi...@apache.org.
YARN-7556. Fair scheduler configuration should allow resource types in the minResources and maxResources properties. (Daniel Templeton and Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9edc74f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9edc74f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9edc74f6

Branch: refs/heads/HDFS-12090
Commit: 9edc74f64a31450af3c55c0dadf352862e4b359d
Parents: 39ad989
Author: Haibo Chen <ha...@apache.org>
Authored: Thu Jul 5 10:42:39 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Fri Jul 6 11:03:48 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  17 +-
 .../hadoop/yarn/api/records/Resource.java       |  13 ++
 .../api/records/impl/LightWeightResource.java   |  23 ++-
 .../scheduler/fair/ConfigurableResource.java    |  69 +++++++-
 .../fair/FairSchedulerConfiguration.java        | 174 ++++++++++++++++---
 .../allocation/AllocationFileQueueParser.java   |   2 +-
 .../fair/TestFairSchedulerConfiguration.java    | 151 ++++++++++++----
 .../src/site/markdown/FairScheduler.md          |   6 +-
 8 files changed, 385 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5841361..5cc81e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -67,11 +67,6 @@
   </Match>
   <Match>
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
-    <Method name="getLocalityStatistics" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="incNumAllocatedContainers"/>
     <Bug pattern="VO_VOLATILE_INCREMENT" />
   </Match>
@@ -118,6 +113,18 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
 
+  <!-- Ignore exposed internal representations -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.api.records.Resource" />
+    <Method name="getResources" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
+    <Method name="getLocalityStatistics" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+
   <!-- Object cast is based on the event type -->
   <Match>
     <Class name="org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher" />

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 71a6b54..173d4c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
@@ -75,6 +76,18 @@ public abstract class Resource implements Comparable<Resource> {
   @Private
   public static final int VCORES_INDEX = 1;
 
+  /**
+   * Return a new {@link Resource} instance with all resource values
+   * initialized to {@code value}.
+   * @param value the value to use for all resources
+   * @return a new {@link Resource} instance
+   */
+  @Private
+  @Unstable
+  public static Resource newInstance(long value) {
+    return new LightWeightResource(value);
+  }
+
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index a6e6432..77f77f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -18,9 +18,8 @@
 
 package org.apache.hadoop.yarn.api.records.impl;
 
-import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -58,13 +57,29 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.*;
  *
  * @see Resource
  */
-@InterfaceAudience.Private
+@Private
 @Unstable
 public class LightWeightResource extends Resource {
 
   private ResourceInformation memoryResInfo;
   private ResourceInformation vcoresResInfo;
 
+  /**
+   * Create a new {@link LightWeightResource} instance with all resource values
+   * initialized to {@code value}.
+   * @param value the value to use for all resources
+   */
+  public LightWeightResource(long value) {
+    ResourceInformation[] types = ResourceUtils.getResourceTypesArray();
+    initResourceInformations(value, value, types.length);
+
+    for (int i = 2; i < types.length; i++) {
+      resources[i] = new ResourceInformation();
+      ResourceInformation.copy(types[i], resources[i]);
+      resources[i].setValue(value);
+    }
+  }
+
   public LightWeightResource(long memory, int vcores) {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
@@ -91,7 +106,7 @@ public class LightWeightResource extends Resource {
     }
   }
 
-  private void initResourceInformations(long memory, int vcores,
+  private void initResourceInformations(long memory, long vcores,
       int numberOfKnownResourceTypes) {
     this.memoryResInfo = newDefaultInformation(MEMORY_URI, MEMORY_MB.getUnits(),
         memory);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
index ecdd011..0c3b0dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
@@ -18,9 +18,13 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import java.util.Arrays;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 /**
  * A {@code ConfigurableResource} object represents an entity that is used to
@@ -33,29 +37,53 @@ public class ConfigurableResource {
   private final Resource resource;
   private final double[] percentages;
 
-  public ConfigurableResource(double[] percentages) {
+  ConfigurableResource() {
+    this(getOneHundredPercentArray());
+  }
+
+  ConfigurableResource(double[] percentages) {
     this.percentages = percentages.clone();
     this.resource = null;
   }
 
+  ConfigurableResource(long value) {
+    this(Resource.newInstance(value));
+  }
+
   public ConfigurableResource(Resource resource) {
     this.percentages = null;
     this.resource = resource;
   }
 
+  private static double[] getOneHundredPercentArray() {
+    double[] resourcePercentages =
+        new double[ResourceUtils.getNumberOfKnownResourceTypes()];
+    Arrays.fill(resourcePercentages, 1.0);
+
+    return resourcePercentages;
+  }
+
   /**
    * Get resource by multiplying the cluster resource and the percentage of
    * each resource respectively. Return the absolute resource if either
    * {@code percentages} or {@code clusterResource} is null.
    *
    * @param clusterResource the cluster resource
-   * @return resource
+   * @return resource the resulting resource
    */
   public Resource getResource(Resource clusterResource) {
     if (percentages != null && clusterResource != null) {
       long memory = (long) (clusterResource.getMemorySize() * percentages[0]);
       int vcore = (int) (clusterResource.getVirtualCores() * percentages[1]);
-      return Resource.newInstance(memory, vcore);
+      Resource res = Resource.newInstance(memory, vcore);
+      ResourceInformation[] clusterInfo = clusterResource.getResources();
+
+      for (int i = 2; i < clusterInfo.length; i++) {
+        res.setResourceValue(i,
+            (long)(clusterInfo[i].getValue() * percentages[i]));
+      }
+
+      return res;
     } else {
       return resource;
     }
@@ -69,4 +97,39 @@ public class ConfigurableResource {
   public Resource getResource() {
     return resource;
   }
+
+  /**
+   * Set the value of the wrapped resource if this object isn't setup to use
+   * percentages. If this object is set to use percentages, this method has
+   * no effect.
+   *
+   * @param name the name of the resource
+   * @param value the value
+   */
+  void setValue(String name, long value) {
+    if (resource != null) {
+      resource.setResourceValue(name, value);
+    }
+  }
+
+  /**
+   * Set the percentage of the resource if this object is setup to use
+   * percentages. If this object is set to use percentages, this method has
+   * no effect.
+   *
+   * @param name the name of the resource
+   * @param value the percentage
+   */
+  void setPercentage(String name, double value) {
+    if (percentages != null) {
+      Integer index = ResourceUtils.getResourceTypeIndex().get(name);
+
+      if (index != null) {
+        percentages[index] = value;
+      } else {
+        throw new ResourceNotFoundException("The requested resource, \""
+            + name + "\", could not be found.");
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index b50e4bb..8c4932b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -213,6 +214,9 @@ public class FairSchedulerConfiguration extends Configuration {
           CONF_PREFIX + "reservable-nodes";
   public static final float RESERVABLE_NODES_DEFAULT = 0.05f;
 
+  private static final String INVALID_RESOURCE_DEFINITION_PREFIX =
+          "Error reading resource config--invalid resource definition: ";
+
   public FairSchedulerConfiguration() {
     super();
   }
@@ -407,54 +411,167 @@ public class FairSchedulerConfiguration extends Configuration {
   }
 
   /**
-   * Parses a resource config value of a form like "1024", "1024 mb",
-   * or "1024 mb, 3 vcores". If no units are given, megabytes are assumed.
-   * 
-   * @throws AllocationConfigurationException
+   * Parses a resource config value in one of three forms:
+   * <ol>
+   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
+   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
+   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
+   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
+   * </ol>
+   * In new style resources, any resource that is not specified will be
+   * set to {@link Long#MAX_VALUE} or 100%, as appropriate. Also, in the new
+   * style resources, units are not allowed. Units are assumed from the resource
+   * manager's settings for the resources when the value isn't a percentage.
+   *
+   * @param value the resource definition to parse
+   * @return a {@link ConfigurableResource} that represents the parsed value
+   * @throws AllocationConfigurationException if the raw value is not a valid
+   * resource definition
    */
-  public static ConfigurableResource parseResourceConfigValue(String val)
+  public static ConfigurableResource parseResourceConfigValue(String value)
       throws AllocationConfigurationException {
+    return parseResourceConfigValue(value, Long.MAX_VALUE);
+  }
+
+  /**
+   * Parses a resource config value in one of three forms:
+   * <ol>
+   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
+   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
+   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
+   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
+   * </ol>
+   * In new style resources, any resource that is not specified will be
+   * set to {@code missing} or 0%, as appropriate. Also, in the new style
+   * resources, units are not allowed. Units are assumed from the resource
+   * manager's settings for the resources when the value isn't a percentage.
+   *
+   * The {@code missing} parameter is only used in the case of new style
+   * resources without percentages. With new style resources with percentages,
+   * any missing resources will be assumed to be 100% because percentages are
+   * only used with maximum resource limits.
+   *
+   * @param value the resource definition to parse
+   * @param missing the value to use for any unspecified resources
+   * @return a {@link ConfigurableResource} that represents the parsed value
+   * @throws AllocationConfigurationException if the raw value is not a valid
+   * resource definition
+   */
+  public static ConfigurableResource parseResourceConfigValue(String value,
+      long missing) throws AllocationConfigurationException {
     ConfigurableResource configurableResource;
+
+    if (value.trim().isEmpty()) {
+      throw new AllocationConfigurationException("Error reading resource "
+          + "config--the resource string is empty.");
+    }
+
     try {
-      val = StringUtils.toLowerCase(val);
-      if (val.contains("%")) {
-        configurableResource = new ConfigurableResource(
-            getResourcePercentage(val));
+      if (value.contains("=")) {
+        configurableResource = parseNewStyleResource(value, missing);
+      } else if (value.contains("%")) {
+        configurableResource = parseOldStyleResourceAsPercentage(value);
       } else {
-        int memory = findResource(val, "mb");
-        int vcores = findResource(val, "vcores");
-        configurableResource = new ConfigurableResource(
-            BuilderUtils.newResource(memory, vcores));
+        configurableResource = parseOldStyleResource(value);
       }
-    } catch (AllocationConfigurationException ex) {
-      throw ex;
-    } catch (Exception ex) {
+    } catch (RuntimeException ex) {
       throw new AllocationConfigurationException(
           "Error reading resource config", ex);
     }
+
+    return configurableResource;
+  }
+
+  private static ConfigurableResource parseNewStyleResource(String value,
+          long missing) throws AllocationConfigurationException {
+
+    final ConfigurableResource configurableResource;
+    boolean asPercent = value.contains("%");
+    if (asPercent) {
+      configurableResource = new ConfigurableResource();
+    } else {
+      configurableResource = new ConfigurableResource(missing);
+    }
+
+    String[] resources = value.split(",");
+    for (String resource : resources) {
+      String[] parts = resource.split("=");
+
+      if (parts.length != 2) {
+        throw createConfigException(value,
+                        "Every resource must be of the form: name=value.");
+      }
+
+      String resourceName = parts[0].trim();
+      String resourceValue = parts[1].trim();
+      try {
+        if (asPercent) {
+          configurableResource.setPercentage(resourceName,
+              findPercentage(resourceValue, ""));
+        } else {
+          configurableResource.setValue(resourceName,
+              Long.parseLong(resourceValue));
+        }
+      } catch (ResourceNotFoundException ex) {
+        throw createConfigException(value, "The "
+            + "resource name, \"" + resourceName + "\" was not "
+            + "recognized. Please check the value of "
+            + YarnConfiguration.RESOURCE_TYPES + " in the Resource "
+            + "Manager's configuration files.", ex);
+      } catch (NumberFormatException ex) {
+        // This only comes from Long.parseLong()
+        throw createConfigException(value, "The "
+            + "resource values must all be integers. \"" + resourceValue
+            + "\" is not an integer.", ex);
+      } catch (AllocationConfigurationException ex) {
+        // This only comes from findPercentage()
+        throw createConfigException(value, "The "
+            + "resource values must all be percentages. \""
+            + resourceValue + "\" is either not a number or does not "
+            + "include the '%' symbol.", ex);
+      }
+    }
     return configurableResource;
   }
 
+  private static ConfigurableResource parseOldStyleResourceAsPercentage(
+          String value) throws AllocationConfigurationException {
+    return new ConfigurableResource(
+            getResourcePercentage(StringUtils.toLowerCase(value)));
+  }
+
+  private static ConfigurableResource parseOldStyleResource(String value)
+          throws AllocationConfigurationException {
+    final String lCaseValue = StringUtils.toLowerCase(value);
+    int memory = findResource(lCaseValue, "mb");
+    int vcores = findResource(lCaseValue, "vcores");
+
+    return new ConfigurableResource(
+            BuilderUtils.newResource(memory, vcores));
+  }
+
   private static double[] getResourcePercentage(
       String val) throws AllocationConfigurationException {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
     double[] resourcePercentage = new double[numberOfKnownResourceTypes];
     String[] strings = val.split(",");
+
     if (strings.length == 1) {
       double percentage = findPercentage(strings[0], "");
       for (int i = 0; i < numberOfKnownResourceTypes; i++) {
-        resourcePercentage[i] = percentage/100;
+        resourcePercentage[i] = percentage;
       }
     } else {
-      resourcePercentage[0] = findPercentage(val, "memory")/100;
-      resourcePercentage[1] = findPercentage(val, "cpu")/100;
+      resourcePercentage[0] = findPercentage(val, "memory");
+      resourcePercentage[1] = findPercentage(val, "cpu");
     }
+
     return resourcePercentage;
   }
 
   private static double findPercentage(String val, String units)
-    throws AllocationConfigurationException {
+      throws AllocationConfigurationException {
     final Pattern pattern =
         Pattern.compile("((\\d+)(\\.\\d*)?)\\s*%\\s*" + units);
     Matcher matcher = pattern.matcher(val);
@@ -467,7 +584,22 @@ public class FairSchedulerConfiguration extends Configuration {
             units);
       }
     }
-    return Double.parseDouble(matcher.group(1));
+    return Double.parseDouble(matcher.group(1)) / 100.0;
+  }
+
+  private static AllocationConfigurationException createConfigException(
+          String value, String message) {
+    return createConfigException(value, message, null);
+  }
+
+  private static AllocationConfigurationException createConfigException(
+      String value, String message, Throwable t) {
+    String msg = INVALID_RESOURCE_DEFINITION_PREFIX + value + ". " + message;
+    if (t != null) {
+      return new AllocationConfigurationException(msg, t);
+    } else {
+      return new AllocationConfigurationException(msg);
+    }
   }
 
   public long getUpdateInterval() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
index d5a436e..441c34a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
@@ -134,7 +134,7 @@ public class AllocationFileQueueParser {
       if (MIN_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);
         ConfigurableResource val =
-            FairSchedulerConfiguration.parseResourceConfigValue(text);
+            FairSchedulerConfiguration.parseResourceConfigValue(text, 0L);
         builder.minQueueResources(queueName, val.getResource());
       } else if (MAX_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 481645b..76a5af5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -102,60 +102,145 @@ public class TestFairSchedulerConfiguration {
 
   @Test
   public void testParseResourceConfigValue() throws Exception {
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("2 vcores, 1024 mb").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024 mb, 2 vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("2vcores,1024mb").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024mb,2vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024   mb, 2    vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024 Mb, 2 vCores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024 mb, 2 vcores  ").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024.3 mb, 2.35 vcores  ").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024. mb, 2. vcores  ").getResource());
-
-    Resource clusterResource = BuilderUtils.newResource(2048, 4);
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    Resource expected = BuilderUtils.newResource(5 * 1024, 2);
+    Resource clusterResource = BuilderUtils.newResource(10 * 1024, 4);
+
+    assertEquals(expected,
+        parseResourceConfigValue("2 vcores, 5120 mb").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120 mb, 2 vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("2vcores,5120mb").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120mb,2vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120mb   mb, 2    vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120 Mb, 2 vCores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120 mb, 2 vcores  ").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120.3 mb, 2.35 vcores  ").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120. mb, 2. vcores  ").getResource());
+
+    assertEquals(expected,
         parseResourceConfigValue("50% memory, 50% cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50% Memory, 50% CpU").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("50%").getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 4),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
         parseResourceConfigValue("50% memory, 100% cpu").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 4),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
         parseResourceConfigValue(" 100% cpu, 50% memory").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 0),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 0),
         parseResourceConfigValue("50% memory, 0% cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50 % memory, 50 % cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50%memory,50%cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("  50  %  memory,  50  %  cpu  ").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50.% memory, 50.% cpu").
             getResource(clusterResource));
-
-    clusterResource =  BuilderUtils.newResource(1024 * 10, 4);
     assertEquals(BuilderUtils.newResource((int)(1024 * 10 * 0.109), 2),
         parseResourceConfigValue("10.9% memory, 50.6% cpu").
             getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("50%").getResource(clusterResource));
+
+    Configuration conf = new Configuration();
+
+    conf.set(YarnConfiguration.RESOURCE_TYPES, "test1");
+    ResourceUtils.resetResourceTypes(conf);
+
+    clusterResource = BuilderUtils.newResource(10 * 1024, 4);
+    expected = BuilderUtils.newResource(5 * 1024, 2);
+    expected.setResourceValue("test1", Long.MAX_VALUE);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, vcores=2").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120").getResource());
+    assertEquals(expected, parseResourceConfigValue(" vcores = 2 , "
+            + "memory-mb = 5120 ").getResource());
+
+    expected.setResourceValue("test1", 0L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, vcores=2", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 ",
+            0L).getResource());
+
+    clusterResource.setResourceValue("test1", 8L);
+    expected.setResourceValue("test1", 4L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120, "
+            + "test1=4").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("test1=4, vcores=2, "
+            + "memory-mb=5120").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, test1=4, "
+            + "vcores=2").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120,"
+            + "test1=4").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 , "
+            + "test1 = 4 ").getResource());
+
+    expected = BuilderUtils.newResource(4 * 1024, 3);
+    expected.setResourceValue("test1", 8L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%, "
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=40%, "
+            + "vcores=75%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%,"
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 75 % , "
+            + "memory-mb = 40 % ").getResource(clusterResource));
+
+    expected.setResourceValue("test1", 4L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%, memory-mb=40%, "
+            + "test1=50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("test1=50%, vcores=75%, "
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=40%, test1=50%, "
+            + "vcores=75%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%,memory-mb=40%,"
+            + "test1=50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
+            + "test1 = 50 % ").getResource(clusterResource));
   }
   
   @Test(expected = AllocationConfigurationException.class)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edc74f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 269f5b4..b5bcbf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,11 +86,11 @@ The allocation file must be in XML format. The format contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an optional attribute 'type', which when set to 'parent' makes it a parent queue. This is useful when we want to create a parent queue without configuring any leaf queues. Each queue element may contain the following properties:
 
-    * **minResources**: minimum resources the queue is entitled to, in the form "X mb, Y vcores". For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and minimum. Note that it is possible that a queue that is below its minimum may not immediately get up to its minimum when it submits an application, because already-running jobs may be using those resources.
+    * **minResources**: minimum resources the queue is entitled to, in the form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is required when specifying resources other than memory and CPU. For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and its minimum. Note that it is possible for a queue that is below its minimum to not immediately get up to its minimum when an a
 pplication is submitted to the queue, because already-running jobs may be using those resources.
 
-    * **maxResources**: maximum resources a queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). A queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxResources**: maximum resources a queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. A queue will not be assigned a container that would put its aggregate usage over this limit.
 
-    * **maxChildResources**: maximum resources an ad hoc child queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxChildResources**: maximum resources an ad hoc child queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
 
     * **maxRunningApps**: limit the number of apps from the queue to run at once
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java
deleted file mode 100644
index 15c3fd3..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.LinkedList;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This class tests the versioning of blocks from KSM side.
- */
-public class TestKsmBlockVersioning {
-  private static MiniOzoneCluster cluster = null;
-  private static UserArgs userArgs;
-  private static OzoneConfiguration conf;
-  private static KeySpaceManager keySpaceManager;
-  private static StorageHandler storageHandler;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    keySpaceManager = cluster.getKeySpaceManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testAllocateCommit() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(1000)
-        .build();
-
-    // 1st update, version 0
-    OpenKeySession openKey = keySpaceManager.openKey(keyArgs);
-    keySpaceManager.commitKey(keyArgs, openKey.getId());
-
-    KsmKeyInfo keyInfo = keySpaceManager.lookupKey(keyArgs);
-    KsmKeyLocationInfoGroup highestVersion =
-        checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(0, highestVersion.getVersion());
-    assertEquals(1, highestVersion.getLocationList().size());
-
-    // 2nd update, version 1
-    openKey = keySpaceManager.openKey(keyArgs);
-    //KsmKeyLocationInfo locationInfo =
-    //    keySpaceManager.allocateBlock(keyArgs, openKey.getId());
-    keySpaceManager.commitKey(keyArgs, openKey.getId());
-
-    keyInfo = keySpaceManager.lookupKey(keyArgs);
-    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(1, highestVersion.getVersion());
-    assertEquals(2, highestVersion.getLocationList().size());
-
-    // 3rd update, version 2
-    openKey = keySpaceManager.openKey(keyArgs);
-    // this block will be appended to the latest version of version 2.
-    keySpaceManager.allocateBlock(keyArgs, openKey.getId());
-    keySpaceManager.commitKey(keyArgs, openKey.getId());
-
-    keyInfo = keySpaceManager.lookupKey(keyArgs);
-    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
-    assertEquals(2, highestVersion.getVersion());
-    assertEquals(4, highestVersion.getLocationList().size());
-  }
-
-  private KsmKeyLocationInfoGroup checkVersions(
-      List<KsmKeyLocationInfoGroup> versions) {
-    KsmKeyLocationInfoGroup currentVersion = null;
-    for (KsmKeyLocationInfoGroup version : versions) {
-      if (currentVersion != null) {
-        assertEquals(currentVersion.getVersion() + 1, version.getVersion());
-        for (KsmKeyLocationInfo info : currentVersion.getLocationList()) {
-          boolean found = false;
-          // all the blocks from the previous version must present in the next
-          // version
-          for (KsmKeyLocationInfo info2 : version.getLocationList()) {
-            if (info.getLocalID() == info2.getLocalID()) {
-              found = true;
-              break;
-            }
-          }
-          assertTrue(found);
-        }
-      }
-      currentVersion = version;
-    }
-    return currentVersion;
-  }
-
-  @Test
-  public void testReadLatestVersion() throws Exception {
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(1000)
-        .build();
-
-    String dataString = RandomStringUtils.randomAlphabetic(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    // this write will create 1st version with one block
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    KsmKeyInfo keyInfo = keySpaceManager.lookupKey(ksmKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
-    assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(1,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-
-    // this write will create 2nd version, 2nd version will contain block from
-    // version 1, and add a new block
-    dataString = RandomStringUtils.randomAlphabetic(10);
-    data = new byte[dataString.length()];
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    keyInfo = keySpaceManager.lookupKey(ksmKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
-    assertEquals(1, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(2,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-
-    dataString = RandomStringUtils.randomAlphabetic(200);
-    data = new byte[dataString.length()];
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    keyInfo = keySpaceManager.lookupKey(ksmKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
-    assertEquals(2, keyInfo.getLatestVersionLocations().getVersion());
-    assertEquals(3,
-        keyInfo.getLatestVersionLocations().getLocationList().size());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java
deleted file mode 100644
index 1cb6e82..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.LinkedList;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test key write/read where a key can span multiple containers.
- */
-public class TestMultipleContainerReadWrite {
-  private static MiniOzoneCluster cluster = null;
-  private static StorageHandler storageHandler;
-  private static UserArgs userArgs;
-  private static OzoneConfiguration conf;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    // set to as small as 100 bytes per block.
-    conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 1);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 5);
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testWriteRead() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(3 * (int)OzoneConsts.MB);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      outputStream.write(dataString.getBytes());
-    }
-
-    byte[] data = new byte[dataString.length()];
-    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
-      inputStream.read(data, 0, data.length);
-    }
-    assertEquals(dataString, new String(data));
-    // checking whether container meta data has the chunk file persisted.
-    MetricsRecordBuilder containerMetrics = getMetrics(
-        "StorageContainerMetrics");
-    assertCounter("numWriteChunk", 3L, containerMetrics);
-    assertCounter("numReadChunk", 3L, containerMetrics);
-  }
-
-  // Disable this test, because this tests assumes writing beyond a specific
-  // size is not allowed. Which is not true for now. Keeping this test in case
-  // we add this restrict in the future.
-  @Ignore
-  @Test
-  public void testErrorWrite() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString1 = RandomStringUtils.randomAscii(100);
-    String dataString2 = RandomStringUtils.randomAscii(500);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(500);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      // first write will write succeed
-      outputStream.write(dataString1.getBytes());
-      // second write
-      exception.expect(IOException.class);
-      exception.expectMessage(
-          "Can not write 500 bytes with only 400 byte space");
-      outputStream.write(dataString2.getBytes());
-    }
-  }
-
-  @Test
-  public void testPartialRead() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(500);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(500);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      outputStream.write(dataString.getBytes());
-    }
-
-    byte[] data = new byte[600];
-    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
-      int readLen = inputStream.read(data, 0, 340);
-      assertEquals(340, readLen);
-      assertEquals(dataString.substring(0, 340),
-          new String(data).substring(0, 340));
-
-      readLen = inputStream.read(data, 340, 260);
-      assertEquals(160, readLen);
-      assertEquals(dataString, new String(data).substring(0, 500));
-
-      readLen = inputStream.read(data, 500, 1);
-      assertEquals(-1, readLen);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
new file mode 100644
index 0000000..5481506
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.client.*;
+import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+
+/**
+ * This class tests container report with DN container state info.
+ */
+public class TestContainerReportWithKeys {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      TestContainerReportWithKeys.class);
+  private static MiniOzoneCluster cluster = null;
+  private static OzoneConfiguration conf;
+  private static StorageContainerManager scm;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    scm = cluster.getStorageContainerManager();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testContainerReportKeyWrite() throws Exception {
+    final String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    final String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    final String keyName = "key" + RandomStringUtils.randomNumeric(5);
+    final int keySize = 100;
+
+    OzoneClient client = OzoneClientFactory.getClient(conf);
+    ObjectStore objectStore = client.getObjectStore();
+    objectStore.createVolume(volumeName);
+    objectStore.getVolume(volumeName).createBucket(bucketName);
+    OzoneOutputStream key =
+        objectStore.getVolume(volumeName).getBucket(bucketName)
+            .createKey(keyName, keySize, ReplicationType.STAND_ALONE,
+                ReplicationFactor.ONE);
+    String dataString = RandomStringUtils.randomAlphabetic(keySize);
+    key.write(dataString.getBytes());
+    key.close();
+
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setType(HddsProtos.ReplicationType.STAND_ALONE)
+        .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(keySize)
+        .build();
+
+
+    OmKeyLocationInfo keyInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
+            .get(0).getBlocksLatestVersionOnly().get(0);
+
+    ContainerData cd = getContainerData(keyInfo.getContainerID());
+
+    LOG.info("DN Container Data:  keyCount: {} used: {} ",
+        cd.getKeyCount(), cd.getBytesUsed());
+
+    ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
+
+    LOG.info("SCM Container Info keyCount: {} usedBytes: {}",
+        cinfo.getNumberOfKeys(), cinfo.getUsedBytes());
+  }
+
+
+  private static ContainerData getContainerData(long containerID) {
+    ContainerData containerData;
+    try {
+      ContainerManager containerManager = cluster.getHddsDatanodes().get(0)
+          .getDatanodeStateMachine().getContainer().getContainerManager();
+      containerData = containerManager.readContainer(containerID);
+    } catch (StorageContainerException e) {
+      throw new AssertionError(e);
+    }
+    return containerData;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java
new file mode 100644
index 0000000..1389cba
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.KeyArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.LinkedList;
+
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Test key write/read where a key can span multiple containers.
+ */
+public class TestMultipleContainerReadWrite {
+  private static MiniOzoneCluster cluster = null;
+  private static StorageHandler storageHandler;
+  private static UserArgs userArgs;
+  private static OzoneConfiguration conf;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    // set to as small as 100 bytes per block.
+    conf.setLong(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB, 1);
+    conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 5);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testWriteRead() throws Exception {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(3 * (int)OzoneConsts.MB);
+
+    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
+      outputStream.write(dataString.getBytes());
+    }
+
+    byte[] data = new byte[dataString.length()];
+    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
+      inputStream.read(data, 0, data.length);
+    }
+    assertEquals(dataString, new String(data));
+    // checking whether container meta data has the chunk file persisted.
+    MetricsRecordBuilder containerMetrics = getMetrics(
+        "StorageContainerMetrics");
+    assertCounter("numWriteChunk", 3L, containerMetrics);
+    assertCounter("numReadChunk", 3L, containerMetrics);
+  }
+
+  // Disable this test, because this tests assumes writing beyond a specific
+  // size is not allowed. Which is not true for now. Keeping this test in case
+  // we add this restrict in the future.
+  @Ignore
+  @Test
+  public void testErrorWrite() throws Exception {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString1 = RandomStringUtils.randomAscii(100);
+    String dataString2 = RandomStringUtils.randomAscii(500);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(500);
+
+    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
+      // first write will write succeed
+      outputStream.write(dataString1.getBytes());
+      // second write
+      exception.expect(IOException.class);
+      exception.expectMessage(
+          "Can not write 500 bytes with only 400 byte space");
+      outputStream.write(dataString2.getBytes());
+    }
+  }
+
+  @Test
+  public void testPartialRead() throws Exception {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    String dataString = RandomStringUtils.randomAscii(500);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    keyArgs.setSize(500);
+
+    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
+      outputStream.write(dataString.getBytes());
+    }
+
+    byte[] data = new byte[600];
+    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
+      int readLen = inputStream.read(data, 0, 340);
+      assertEquals(340, readLen);
+      assertEquals(dataString.substring(0, 340),
+          new String(data).substring(0, 340));
+
+      readLen = inputStream.read(data, 340, 260);
+      assertEquals(160, readLen);
+      assertEquals(dataString, new String(data).substring(0, 500));
+
+      readLen = inputStream.read(data, 500, 1);
+      assertEquals(-1, readLen);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
new file mode 100644
index 0000000..15122b9
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
@@ -0,0 +1,253 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.KeyArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.LinkedList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This class tests the versioning of blocks from OM side.
+ */
+public class TestOmBlockVersioning {
+  private static MiniOzoneCluster cluster = null;
+  private static UserArgs userArgs;
+  private static OzoneConfiguration conf;
+  private static OzoneManager ozoneManager;
+  private static StorageHandler storageHandler;
+
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    ozoneManager = cluster.getOzoneManager();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testAllocateCommit() throws Exception {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(1000)
+        .build();
+
+    // 1st update, version 0
+    OpenKeySession openKey = ozoneManager.openKey(keyArgs);
+    ozoneManager.commitKey(keyArgs, openKey.getId());
+
+    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
+    OmKeyLocationInfoGroup highestVersion =
+        checkVersions(keyInfo.getKeyLocationVersions());
+    assertEquals(0, highestVersion.getVersion());
+    assertEquals(1, highestVersion.getLocationList().size());
+
+    // 2nd update, version 1
+    openKey = ozoneManager.openKey(keyArgs);
+    //OmKeyLocationInfo locationInfo =
+    //    ozoneManager.allocateBlock(keyArgs, openKey.getId());
+    ozoneManager.commitKey(keyArgs, openKey.getId());
+
+    keyInfo = ozoneManager.lookupKey(keyArgs);
+    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
+    assertEquals(1, highestVersion.getVersion());
+    assertEquals(2, highestVersion.getLocationList().size());
+
+    // 3rd update, version 2
+    openKey = ozoneManager.openKey(keyArgs);
+    // this block will be appended to the latest version of version 2.
+    ozoneManager.allocateBlock(keyArgs, openKey.getId());
+    ozoneManager.commitKey(keyArgs, openKey.getId());
+
+    keyInfo = ozoneManager.lookupKey(keyArgs);
+    highestVersion = checkVersions(keyInfo.getKeyLocationVersions());
+    assertEquals(2, highestVersion.getVersion());
+    assertEquals(4, highestVersion.getLocationList().size());
+  }
+
+  private OmKeyLocationInfoGroup checkVersions(
+      List<OmKeyLocationInfoGroup> versions) {
+    OmKeyLocationInfoGroup currentVersion = null;
+    for (OmKeyLocationInfoGroup version : versions) {
+      if (currentVersion != null) {
+        assertEquals(currentVersion.getVersion() + 1, version.getVersion());
+        for (OmKeyLocationInfo info : currentVersion.getLocationList()) {
+          boolean found = false;
+          // all the blocks from the previous version must present in the next
+          // version
+          for (OmKeyLocationInfo info2 : version.getLocationList()) {
+            if (info.getLocalID() == info2.getLocalID()) {
+              found = true;
+              break;
+            }
+          }
+          assertTrue(found);
+        }
+      }
+      currentVersion = version;
+    }
+    return currentVersion;
+  }
+
+  @Test
+  public void testReadLatestVersion() throws Exception {
+
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    String keyName = "key" + RandomStringUtils.randomNumeric(5);
+
+    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
+    createVolumeArgs.setUserName(userName);
+    createVolumeArgs.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs);
+
+    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
+    bucketArgs.setAddAcls(new LinkedList<>());
+    bucketArgs.setRemoveAcls(new LinkedList<>());
+    bucketArgs.setStorageType(StorageType.DISK);
+    storageHandler.createBucket(bucketArgs);
+
+    OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(1000)
+        .build();
+
+    String dataString = RandomStringUtils.randomAlphabetic(100);
+    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
+    // this write will create 1st version with one block
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    byte[] data = new byte[dataString.length()];
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs);
+    assertEquals(dataString, DFSUtil.bytes2String(data));
+    assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
+    assertEquals(1,
+        keyInfo.getLatestVersionLocations().getLocationList().size());
+
+    // this write will create 2nd version, 2nd version will contain block from
+    // version 1, and add a new block
+    dataString = RandomStringUtils.randomAlphabetic(10);
+    data = new byte[dataString.length()];
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    keyInfo = ozoneManager.lookupKey(omKeyArgs);
+    assertEquals(dataString, DFSUtil.bytes2String(data));
+    assertEquals(1, keyInfo.getLatestVersionLocations().getVersion());
+    assertEquals(2,
+        keyInfo.getLatestVersionLocations().getLocationList().size());
+
+    dataString = RandomStringUtils.randomAlphabetic(200);
+    data = new byte[dataString.length()];
+    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
+      stream.write(dataString.getBytes());
+    }
+    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
+      in.read(data);
+    }
+    keyInfo = ozoneManager.lookupKey(omKeyArgs);
+    assertEquals(dataString, DFSUtil.bytes2String(data));
+    assertEquals(2, keyInfo.getLatestVersionLocations().getVersion());
+    assertEquals(3,
+        keyInfo.getLatestVersionLocations().getLocationList().size());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
new file mode 100644
index 0000000..8d0f4b21
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
@@ -0,0 +1,313 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+
+import java.io.IOException;
+
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Test for OM metrics.
+ */
+@SuppressWarnings("deprecation")
+public class TestOmMetrics {
+  private MiniOzoneCluster cluster;
+  private OzoneManager ozoneManager;
+
+  /**
+   * The exception used for testing failure metrics.
+   */
+  private IOException exception = new IOException();
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   *
+   * @throws IOException
+   */
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    ozoneManager = cluster.getOzoneManager();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testVolumeOps() throws IOException {
+    VolumeManager volumeManager =
+        (VolumeManager) org.apache.hadoop.test.Whitebox
+            .getInternalState(ozoneManager, "volumeManager");
+    VolumeManager mockVm = Mockito.spy(volumeManager);
+
+    Mockito.doNothing().when(mockVm).createVolume(null);
+    Mockito.doNothing().when(mockVm).deleteVolume(null);
+    Mockito.doReturn(null).when(mockVm).getVolumeInfo(null);
+    Mockito.doReturn(true).when(mockVm).checkVolumeAccess(null, null);
+    Mockito.doNothing().when(mockVm).setOwner(null, null);
+    Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "volumeManager", mockVm);
+    doVolumeOps();
+
+    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumVolumeOps", 6L, omMetrics);
+    assertCounter("NumVolumeCreates", 1L, omMetrics);
+    assertCounter("NumVolumeUpdates", 1L, omMetrics);
+    assertCounter("NumVolumeInfos", 1L, omMetrics);
+    assertCounter("NumVolumeCheckAccesses", 1L, omMetrics);
+    assertCounter("NumVolumeDeletes", 1L, omMetrics);
+    assertCounter("NumVolumeLists", 1L, omMetrics);
+
+    // inject exception to test for Failure Metrics
+    Mockito.doThrow(exception).when(mockVm).createVolume(null);
+    Mockito.doThrow(exception).when(mockVm).deleteVolume(null);
+    Mockito.doThrow(exception).when(mockVm).getVolumeInfo(null);
+    Mockito.doThrow(exception).when(mockVm).checkVolumeAccess(null, null);
+    Mockito.doThrow(exception).when(mockVm).setOwner(null, null);
+    Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(ozoneManager, "volumeManager", mockVm);
+    doVolumeOps();
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumVolumeOps", 12L, omMetrics);
+    assertCounter("NumVolumeCreates", 2L, omMetrics);
+    assertCounter("NumVolumeUpdates", 2L, omMetrics);
+    assertCounter("NumVolumeInfos", 2L, omMetrics);
+    assertCounter("NumVolumeCheckAccesses", 2L, omMetrics);
+    assertCounter("NumVolumeDeletes", 2L, omMetrics);
+    assertCounter("NumVolumeLists", 2L, omMetrics);
+
+    assertCounter("NumVolumeCreateFails", 1L, omMetrics);
+    assertCounter("NumVolumeUpdateFails", 1L, omMetrics);
+    assertCounter("NumVolumeInfoFails", 1L, omMetrics);
+    assertCounter("NumVolumeCheckAccessFails", 1L, omMetrics);
+    assertCounter("NumVolumeDeleteFails", 1L, omMetrics);
+    assertCounter("NumVolumeListFails", 1L, omMetrics);
+  }
+
+  @Test
+  public void testBucketOps() throws IOException {
+    BucketManager bucketManager =
+        (BucketManager) org.apache.hadoop.test.Whitebox
+            .getInternalState(ozoneManager, "bucketManager");
+    BucketManager mockBm = Mockito.spy(bucketManager);
+
+    Mockito.doNothing().when(mockBm).createBucket(null);
+    Mockito.doNothing().when(mockBm).deleteBucket(null, null);
+    Mockito.doReturn(null).when(mockBm).getBucketInfo(null, null);
+    Mockito.doNothing().when(mockBm).setBucketProperty(null);
+    Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "bucketManager", mockBm);
+    doBucketOps();
+
+    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumBucketOps", 5L, omMetrics);
+    assertCounter("NumBucketCreates", 1L, omMetrics);
+    assertCounter("NumBucketUpdates", 1L, omMetrics);
+    assertCounter("NumBucketInfos", 1L, omMetrics);
+    assertCounter("NumBucketDeletes", 1L, omMetrics);
+    assertCounter("NumBucketLists", 1L, omMetrics);
+
+    // inject exception to test for Failure Metrics
+    Mockito.doThrow(exception).when(mockBm).createBucket(null);
+    Mockito.doThrow(exception).when(mockBm).deleteBucket(null, null);
+    Mockito.doThrow(exception).when(mockBm).getBucketInfo(null, null);
+    Mockito.doThrow(exception).when(mockBm).setBucketProperty(null);
+    Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "bucketManager", mockBm);
+    doBucketOps();
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumBucketOps", 10L, omMetrics);
+    assertCounter("NumBucketCreates", 2L, omMetrics);
+    assertCounter("NumBucketUpdates", 2L, omMetrics);
+    assertCounter("NumBucketInfos", 2L, omMetrics);
+    assertCounter("NumBucketDeletes", 2L, omMetrics);
+    assertCounter("NumBucketLists", 2L, omMetrics);
+
+    assertCounter("NumBucketCreateFails", 1L, omMetrics);
+    assertCounter("NumBucketUpdateFails", 1L, omMetrics);
+    assertCounter("NumBucketInfoFails", 1L, omMetrics);
+    assertCounter("NumBucketDeleteFails", 1L, omMetrics);
+    assertCounter("NumBucketListFails", 1L, omMetrics);
+  }
+
+  @Test
+  public void testKeyOps() throws IOException {
+    KeyManager bucketManager = (KeyManager) org.apache.hadoop.test.Whitebox
+        .getInternalState(ozoneManager, "keyManager");
+    KeyManager mockKm = Mockito.spy(bucketManager);
+
+    Mockito.doReturn(null).when(mockKm).openKey(null);
+    Mockito.doNothing().when(mockKm).deleteKey(null);
+    Mockito.doReturn(null).when(mockKm).lookupKey(null);
+    Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "keyManager", mockKm);
+    doKeyOps();
+
+    MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumKeyOps", 4L, omMetrics);
+    assertCounter("NumKeyAllocate", 1L, omMetrics);
+    assertCounter("NumKeyLookup", 1L, omMetrics);
+    assertCounter("NumKeyDeletes", 1L, omMetrics);
+    assertCounter("NumKeyLists", 1L, omMetrics);
+
+    // inject exception to test for Failure Metrics
+    Mockito.doThrow(exception).when(mockKm).openKey(null);
+    Mockito.doThrow(exception).when(mockKm).deleteKey(null);
+    Mockito.doThrow(exception).when(mockKm).lookupKey(null);
+    Mockito.doThrow(exception).when(mockKm).listKeys(
+        null, null, null, null, 0);
+
+    org.apache.hadoop.test.Whitebox.setInternalState(
+        ozoneManager, "keyManager", mockKm);
+    doKeyOps();
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumKeyOps", 8L, omMetrics);
+    assertCounter("NumKeyAllocate", 2L, omMetrics);
+    assertCounter("NumKeyLookup", 2L, omMetrics);
+    assertCounter("NumKeyDeletes", 2L, omMetrics);
+    assertCounter("NumKeyLists", 2L, omMetrics);
+
+    assertCounter("NumKeyAllocateFails", 1L, omMetrics);
+    assertCounter("NumKeyLookupFails", 1L, omMetrics);
+    assertCounter("NumKeyDeleteFails", 1L, omMetrics);
+    assertCounter("NumKeyListFails", 1L, omMetrics);
+  }
+
+  /**
+   * Test volume operations with ignoring thrown exception.
+   */
+  private void doVolumeOps() {
+    try {
+      ozoneManager.createVolume(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.deleteVolume(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.getVolumeInfo(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.checkVolumeAccess(null, null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.setOwner(null, null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.listAllVolumes(null, null, 0);
+    } catch (IOException ignored) {
+    }
+  }
+
+  /**
+   * Test bucket operations with ignoring thrown exception.
+   */
+  private void doBucketOps() {
+    try {
+      ozoneManager.createBucket(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.deleteBucket(null, null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.getBucketInfo(null, null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.setBucketProperty(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.listBuckets(null, null, null, 0);
+    } catch (IOException ignored) {
+    }
+  }
+
+  /**
+   * Test key operations with ignoring thrown exception.
+   */
+  private void doKeyOps() {
+    try {
+      ozoneManager.openKey(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.deleteKey(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.lookupKey(null);
+    } catch (IOException ignored) {
+    }
+
+    try {
+      ozoneManager.listKeys(null, null, null, null, 0);
+    } catch (IOException ignored) {
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
new file mode 100644
index 0000000..005a012
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.scm.cli.SQLCLI;
+import org.apache.hadoop.ozone.web.handlers.BucketArgs;
+import org.apache.hadoop.ozone.web.handlers.KeyArgs;
+import org.apache.hadoop.ozone.web.handlers.UserArgs;
+import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
+import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
+import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This class tests the CLI that transforms om.db into SQLite DB files.
+ */
+@RunWith(Parameterized.class)
+public class TestOmSQLCli {
+  private MiniOzoneCluster cluster = null;
+  private StorageHandler storageHandler;
+  private UserArgs userArgs;
+  private OzoneConfiguration conf;
+  private SQLCLI cli;
+
+  private String userName = "userTest";
+  private String adminName = "adminTest";
+  private String volumeName0 = "volumeTest0";
+  private String volumeName1 = "volumeTest1";
+  private String bucketName0 = "bucketTest0";
+  private String bucketName1 = "bucketTest1";
+  private String bucketName2 = "bucketTest2";
+  private String keyName0 = "key0";
+  private String keyName1 = "key1";
+  private String keyName2 = "key2";
+  private String keyName3 = "key3";
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] {
+        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
+        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
+    });
+  }
+
+  private String metaStoreType;
+
+  public TestOmSQLCli(String type) {
+    metaStoreType = type;
+  }
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "distributed"
+   *
+   * @throws IOException
+   */
+  @Before
+  public void setup() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
+        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
+    cluster = MiniOzoneCluster.newBuilder(conf).build();
+    cluster.waitForClusterToBeReady();
+    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
+    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
+        null, null, null, null);
+    cluster.waitForClusterToBeReady();
+
+    VolumeArgs createVolumeArgs0 = new VolumeArgs(volumeName0, userArgs);
+    createVolumeArgs0.setUserName(userName);
+    createVolumeArgs0.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs0);
+    VolumeArgs createVolumeArgs1 = new VolumeArgs(volumeName1, userArgs);
+    createVolumeArgs1.setUserName(userName);
+    createVolumeArgs1.setAdminName(adminName);
+    storageHandler.createVolume(createVolumeArgs1);
+
+    BucketArgs bucketArgs0 = new BucketArgs(volumeName0, bucketName0, userArgs);
+    storageHandler.createBucket(bucketArgs0);
+    BucketArgs bucketArgs1 = new BucketArgs(volumeName1, bucketName1, userArgs);
+    storageHandler.createBucket(bucketArgs1);
+    BucketArgs bucketArgs2 = new BucketArgs(volumeName0, bucketName2, userArgs);
+    storageHandler.createBucket(bucketArgs2);
+
+    KeyArgs keyArgs0 =
+        new KeyArgs(volumeName0, bucketName0, keyName0, userArgs);
+    keyArgs0.setSize(100);
+    KeyArgs keyArgs1 =
+        new KeyArgs(volumeName1, bucketName1, keyName1, userArgs);
+    keyArgs1.setSize(200);
+    KeyArgs keyArgs2 =
+        new KeyArgs(volumeName0, bucketName2, keyName2, userArgs);
+    keyArgs2.setSize(300);
+    KeyArgs keyArgs3 =
+        new KeyArgs(volumeName0, bucketName2, keyName3, userArgs);
+    keyArgs3.setSize(400);
+
+    OutputStream stream = storageHandler.newKeyWriter(keyArgs0);
+    stream.close();
+    stream = storageHandler.newKeyWriter(keyArgs1);
+    stream.close();
+    stream = storageHandler.newKeyWriter(keyArgs2);
+    stream.close();
+    stream = storageHandler.newKeyWriter(keyArgs3);
+    stream.close();
+
+    cluster.getOzoneManager().stop();
+    cluster.getStorageContainerManager().stop();
+    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
+    cli = new SQLCLI(conf);
+  }
+
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testOmDB() throws Exception {
+    String dbOutPath =  GenericTestUtils.getTempPath(
+        UUID.randomUUID() + "/out_sql.db");
+
+    String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
+    String dbPath = dbRootPath + "/" + OM_DB_NAME;
+    String[] args = {"-p", dbPath, "-o", dbOutPath};
+
+    cli.run(args);
+
+    Connection conn = connectDB(dbOutPath);
+    String sql = "SELECT * FROM volumeList";
+    ResultSet rs = executeQuery(conn, sql);
+    List<String> expectedValues =
+        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
+    while (rs.next()) {
+      String userNameRs = rs.getString("userName");
+      String volumeNameRs = rs.getString("volumeName");
+      assertEquals(userName,  userNameRs.substring(1));
+      assertTrue(expectedValues.remove(volumeNameRs));
+    }
+    assertEquals(0, expectedValues.size());
+
+    sql = "SELECT * FROM volumeInfo";
+    rs = executeQuery(conn, sql);
+    expectedValues =
+        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
+    while (rs.next()) {
+      String adName = rs.getString("adminName");
+      String ownerName = rs.getString("ownerName");
+      String volumeName = rs.getString("volumeName");
+      assertEquals(adminName, adName);
+      assertEquals(userName, ownerName);
+      assertTrue(expectedValues.remove(volumeName));
+    }
+    assertEquals(0, expectedValues.size());
+
+    sql = "SELECT * FROM aclInfo";
+    rs = executeQuery(conn, sql);
+    expectedValues =
+        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
+    while (rs.next()) {
+      String adName = rs.getString("adminName");
+      String ownerName = rs.getString("ownerName");
+      String volumeName = rs.getString("volumeName");
+      String type = rs.getString("type");
+      String uName = rs.getString("userName");
+      String rights = rs.getString("rights");
+      assertEquals(adminName, adName);
+      assertEquals(userName, ownerName);
+      assertEquals("USER", type);
+      assertEquals(userName, uName);
+      assertEquals("READ_WRITE", rights);
+      assertTrue(expectedValues.remove(volumeName));
+    }
+    assertEquals(0, expectedValues.size());
+
+    sql = "SELECT * FROM bucketInfo";
+    rs = executeQuery(conn, sql);
+    HashMap<String, String> expectedMap = new HashMap<>();
+    expectedMap.put(bucketName0, volumeName0);
+    expectedMap.put(bucketName2, volumeName0);
+    expectedMap.put(bucketName1, volumeName1);
+    while (rs.next()) {
+      String volumeName = rs.getString("volumeName");
+      String bucketName = rs.getString("bucketName");
+      boolean versionEnabled = rs.getBoolean("versionEnabled");
+      String storegeType = rs.getString("storageType");
+      assertEquals(volumeName, expectedMap.remove(bucketName));
+      assertFalse(versionEnabled);
+      assertEquals("DISK", storegeType);
+    }
+    assertEquals(0, expectedMap.size());
+
+    sql = "SELECT * FROM keyInfo";
+    rs = executeQuery(conn, sql);
+    HashMap<String, List<String>> expectedMap2 = new HashMap<>();
+    // no data written, data size will be 0
+    expectedMap2.put(keyName0,
+        Arrays.asList(volumeName0, bucketName0, "0"));
+    expectedMap2.put(keyName1,
+        Arrays.asList(volumeName1, bucketName1, "0"));
+    expectedMap2.put(keyName2,
+        Arrays.asList(volumeName0, bucketName2, "0"));
+    expectedMap2.put(keyName3,
+        Arrays.asList(volumeName0, bucketName2, "0"));
+    while (rs.next()) {
+      String volumeName = rs.getString("volumeName");
+      String bucketName = rs.getString("bucketName");
+      String keyName = rs.getString("keyName");
+      int dataSize = rs.getInt("dataSize");
+      List<String> vals = expectedMap2.remove(keyName);
+      assertNotNull(vals);
+      assertEquals(vals.get(0), volumeName);
+      assertEquals(vals.get(1), bucketName);
+      assertEquals(vals.get(2), Integer.toString(dataSize));
+    }
+    assertEquals(0, expectedMap2.size());
+
+    conn.close();
+    Files.delete(Paths.get(dbOutPath));
+  }
+
+  private ResultSet executeQuery(Connection conn, String sql)
+      throws SQLException {
+    Statement stmt = conn.createStatement();
+    return stmt.executeQuery(sql);
+  }
+
+  private Connection connectDB(String dbPath) throws Exception {
+    Class.forName("org.sqlite.JDBC");
+    String connectPath =
+        String.format("jdbc:sqlite:%s", dbPath);
+    return DriverManager.getConnection(connectPath);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java
deleted file mode 100644
index cc2f78a..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java
+++ /dev/null
@@ -1,391 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.VolumeList;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BatchOperation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_USER_MAX_VOLUME_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_USER_MAX_VOLUME;
-import static org.apache.hadoop.ozone.ksm.exceptions
-    .KSMException.ResultCodes;
-
-/**
- * KSM volume management code.
- */
-public class VolumeManagerImpl implements VolumeManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(VolumeManagerImpl.class);
-
-  private final KSMMetadataManager metadataManager;
-  private final int maxUserVolumeCount;
-
-  /**
-   * Constructor.
-   * @param conf - Ozone configuration.
-   * @throws IOException
-   */
-  public VolumeManagerImpl(KSMMetadataManager metadataManager,
-      OzoneConfiguration conf) throws IOException {
-    this.metadataManager = metadataManager;
-    this.maxUserVolumeCount = conf.getInt(OZONE_KSM_USER_MAX_VOLUME,
-        OZONE_KSM_USER_MAX_VOLUME_DEFAULT);
-  }
-
-  // Helpers to add and delete volume from user list
-  private void addVolumeToOwnerList(String volume, String owner,
-      BatchOperation batchOperation) throws IOException {
-    // Get the volume list
-    byte[] dbUserKey = metadataManager.getUserKey(owner);
-    byte[] volumeList  = metadataManager.get(dbUserKey);
-    List<String> prevVolList = new LinkedList<>();
-    if (volumeList != null) {
-      VolumeList vlist = VolumeList.parseFrom(volumeList);
-      prevVolList.addAll(vlist.getVolumeNamesList());
-    }
-
-    // Check the volume count
-    if (prevVolList.size() >= maxUserVolumeCount) {
-      LOG.debug("Too many volumes for user:{}", owner);
-      throw new KSMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES);
-    }
-
-    // Add the new volume to the list
-    prevVolList.add(volume);
-    VolumeList newVolList = VolumeList.newBuilder()
-        .addAllVolumeNames(prevVolList).build();
-    batchOperation.put(dbUserKey, newVolList.toByteArray());
-  }
-
-  private void delVolumeFromOwnerList(String volume, String owner,
-                                      BatchOperation batchOperation)
-      throws IOException {
-    // Get the volume list
-    byte[] dbUserKey = metadataManager.getUserKey(owner);
-    byte[] volumeList  = metadataManager.get(dbUserKey);
-    List<String> prevVolList = new LinkedList<>();
-    if (volumeList != null) {
-      VolumeList vlist = VolumeList.parseFrom(volumeList);
-      prevVolList.addAll(vlist.getVolumeNamesList());
-    } else {
-      LOG.debug("volume:{} not found for user:{}");
-      throw new KSMException(ResultCodes.FAILED_USER_NOT_FOUND);
-    }
-
-    // Remove the volume from the list
-    prevVolList.remove(volume);
-    if (prevVolList.size() == 0) {
-      batchOperation.delete(dbUserKey);
-    } else {
-      VolumeList newVolList = VolumeList.newBuilder()
-          .addAllVolumeNames(prevVolList).build();
-      batchOperation.put(dbUserKey, newVolList.toByteArray());
-    }
-  }
-
-  /**
-   * Creates a volume.
-   * @param args - KsmVolumeArgs.
-   */
-  @Override
-  public void createVolume(KsmVolumeArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(args.getVolume());
-      byte[] volumeInfo = metadataManager.get(dbVolumeKey);
-
-      // Check of the volume already exists
-      if (volumeInfo != null) {
-        LOG.debug("volume:{} already exists", args.getVolume());
-        throw new KSMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS);
-      }
-
-      BatchOperation batch = new BatchOperation();
-      // Write the vol info
-      List<HddsProtos.KeyValue> metadataList = new LinkedList<>();
-      for (Map.Entry<String, String> entry : args.getKeyValueMap().entrySet()) {
-        metadataList.add(HddsProtos.KeyValue.newBuilder()
-            .setKey(entry.getKey()).setValue(entry.getValue()).build());
-      }
-      List<OzoneAclInfo> aclList = args.getAclMap().ozoneAclGetProtobuf();
-
-      VolumeInfo newVolumeInfo = VolumeInfo.newBuilder()
-          .setAdminName(args.getAdminName())
-          .setOwnerName(args.getOwnerName())
-          .setVolume(args.getVolume())
-          .setQuotaInBytes(args.getQuotaInBytes())
-          .addAllMetadata(metadataList)
-          .addAllVolumeAcls(aclList)
-          .setCreationTime(Time.now())
-          .build();
-      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
-
-      // Add volume to user list
-      addVolumeToOwnerList(args.getVolume(), args.getOwnerName(), batch);
-      metadataManager.writeBatch(batch);
-      LOG.debug("created volume:{} user:{}", args.getVolume(),
-          args.getOwnerName());
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Volume creation failed for user:{} volume:{}",
-            args.getOwnerName(), args.getVolume(), ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Changes the owner of a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param owner - Name of the owner.
-   * @throws IOException
-   */
-  @Override
-  public void setOwner(String volume, String owner) throws IOException {
-    Preconditions.checkNotNull(volume);
-    Preconditions.checkNotNull(owner);
-    metadataManager.writeLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("Changing volume ownership failed for user:{} volume:{}",
-            owner, volume);
-        throw  new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-
-      BatchOperation batch = new BatchOperation();
-      delVolumeFromOwnerList(volume, volumeArgs.getOwnerName(), batch);
-      addVolumeToOwnerList(volume, owner, batch);
-
-      KsmVolumeArgs newVolumeArgs =
-          KsmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume())
-              .setAdminName(volumeArgs.getAdminName())
-              .setOwnerName(owner)
-              .setQuotaInBytes(volumeArgs.getQuotaInBytes())
-              .setCreationTime(volumeArgs.getCreationTime())
-              .build();
-
-      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
-      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
-
-      metadataManager.writeBatch(batch);
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Changing volume ownership failed for user:{} volume:{}",
-            owner, volume, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Changes the Quota on a volume.
-   *
-   * @param volume - Name of the volume.
-   * @param quota - Quota in bytes.
-   * @throws IOException
-   */
-  public void setQuota(String volume, long quota) throws IOException {
-    Preconditions.checkNotNull(volume);
-    metadataManager.writeLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-
-      KsmVolumeArgs newVolumeArgs =
-          KsmVolumeArgs.newBuilder()
-              .setVolume(volumeArgs.getVolume())
-              .setAdminName(volumeArgs.getAdminName())
-              .setOwnerName(volumeArgs.getOwnerName())
-              .setQuotaInBytes(quota)
-              .setCreationTime(volumeArgs.getCreationTime()).build();
-
-      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
-      metadataManager.put(dbVolumeKey, newVolumeInfo.toByteArray());
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
-            quota, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Gets the volume information.
-   * @param volume - Volume name.
-   * @return VolumeArgs or exception is thrown.
-   * @throws IOException
-   */
-  public KsmVolumeArgs getVolumeInfo(String volume) throws IOException {
-    Preconditions.checkNotNull(volume);
-    metadataManager.readLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-      return volumeArgs;
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.warn("Info volume failed for volume:{}", volume, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  /**
-   * Deletes an existing empty volume.
-   *
-   * @param volume - Name of the volume.
-   * @throws IOException
-   */
-  @Override
-  public void deleteVolume(String volume) throws IOException {
-    Preconditions.checkNotNull(volume);
-    metadataManager.writeLock().lock();
-    try {
-      BatchOperation batch = new BatchOperation();
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      if (!metadataManager.isVolumeEmpty(volume)) {
-        LOG.debug("volume:{} is not empty", volume);
-        throw new KSMException(ResultCodes.FAILED_VOLUME_NOT_EMPTY);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-      // delete the volume from the owner list
-      // as well as delete the volume entry
-      delVolumeFromOwnerList(volume, volumeInfo.getOwnerName(), batch);
-      batch.delete(dbVolumeKey);
-      metadataManager.writeBatch(batch);
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Delete volume failed for volume:{}", volume, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Checks if the specified user with a role can access this volume.
-   *
-   * @param volume - volume
-   * @param userAcl - user acl which needs to be checked for access
-   * @return true if the user has access for the volume, false otherwise
-   * @throws IOException
-   */
-  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
-      throws IOException {
-    Preconditions.checkNotNull(volume);
-    Preconditions.checkNotNull(userAcl);
-    metadataManager.readLock().lock();
-    try {
-      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
-      byte[] volInfo = metadataManager.get(dbVolumeKey);
-      if (volInfo == null) {
-        LOG.debug("volume:{} does not exist", volume);
-        throw  new KSMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-
-      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
-      KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(volumeInfo);
-      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
-      return volumeArgs.getAclMap().hasAccess(userAcl);
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Check volume access failed for volume:{} user:{} rights:{}",
-            volume, userAcl.getName(), userAcl.getRights(), ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<KsmVolumeArgs> listVolumes(String userName,
-      String prefix, String startKey, int maxKeys) throws IOException {
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.listVolumes(
-          userName, prefix, startKey, maxKeys);
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java
deleted file mode 100644
index b902eab..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/KSMException.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.exceptions;
-
-import java.io.IOException;
-
-/**
- * Exception thrown by KSM.
- */
-public class KSMException extends IOException {
-  private final KSMException.ResultCodes result;
-
-  /**
-   * Constructs an {@code IOException} with {@code null}
-   * as its error detail message.
-   */
-  public KSMException(KSMException.ResultCodes result) {
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   */
-  public KSMException(String message, KSMException.ResultCodes result) {
-    super(message);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified detail message
-   * and cause.
-   * <p>
-   * <p> Note that the detail message associated with {@code cause} is
-   * <i>not</i> automatically incorporated into this exception's detail
-   * message.
-   *
-   * @param message The detail message (which is saved for later retrieval by
-   * the
-   * {@link #getMessage()} method)
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public KSMException(String message, Throwable cause,
-                      KSMException.ResultCodes result) {
-    super(message, cause);
-    this.result = result;
-  }
-
-  /**
-   * Constructs an {@code IOException} with the specified cause and a
-   * detail message of {@code (cause==null ? null : cause.toString())}
-   * (which typically contains the class and detail message of {@code cause}).
-   * This constructor is useful for IO exceptions that are little more
-   * than wrappers for other throwables.
-   *
-   * @param cause The cause (which is saved for later retrieval by the {@link
-   * #getCause()} method).  (A null value is permitted, and indicates that the
-   * cause is nonexistent or unknown.)
-   * @since 1.6
-   */
-  public KSMException(Throwable cause, KSMException.ResultCodes result) {
-    super(cause);
-    this.result = result;
-  }
-
-  /**
-   * Returns resultCode.
-   * @return ResultCode
-   */
-  public KSMException.ResultCodes getResult() {
-    return result;
-  }
-
-  /**
-   * Error codes to make it easy to decode these exceptions.
-   */
-  public enum ResultCodes {
-    FAILED_TOO_MANY_USER_VOLUMES,
-    FAILED_VOLUME_ALREADY_EXISTS,
-    FAILED_VOLUME_NOT_FOUND,
-    FAILED_VOLUME_NOT_EMPTY,
-    FAILED_USER_NOT_FOUND,
-    FAILED_BUCKET_ALREADY_EXISTS,
-    FAILED_BUCKET_NOT_FOUND,
-    FAILED_BUCKET_NOT_EMPTY,
-    FAILED_KEY_ALREADY_EXISTS,
-    FAILED_KEY_NOT_FOUND,
-    FAILED_KEY_ALLOCATION,
-    FAILED_KEY_DELETION,
-    FAILED_KEY_RENAME,
-    FAILED_INVALID_KEY_NAME,
-    FAILED_METADATA_ERROR,
-    FAILED_INTERNAL_ERROR,
-    KSM_NOT_INITIALIZED,
-    SCM_VERSION_MISMATCH_ERROR
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java
deleted file mode 100644
index 09fd87f..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/exceptions/package-info.java
+++ /dev/null
@@ -1,19 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm.exceptions;
-// Exception thrown by KSM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
deleted file mode 100644
index 09d9f32..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/package-info.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-/*
- This package contains the keyspace manager classes.
- */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
new file mode 100644
index 0000000..ddb2b0e
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManager.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * BucketManager handles all the bucket level operations.
+ */
+public interface BucketManager {
+  /**
+   * Creates a bucket.
+   * @param bucketInfo - OmBucketInfo for creating bucket.
+   */
+  void createBucket(OmBucketInfo bucketInfo) throws IOException;
+  /**
+   * Returns Bucket Information.
+   * @param volumeName - Name of the Volume.
+   * @param bucketName - Name of the Bucket.
+   */
+  OmBucketInfo getBucketInfo(String volumeName, String bucketName)
+      throws IOException;
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  void setBucketProperty(OmBucketArgs args) throws IOException;
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volumeName - Name of the volume.
+   * @param bucketName - Name of the bucket.
+   * @throws IOException
+   */
+  void deleteBucket(String volumeName, String bucketName) throws IOException;
+
+  /**
+   * Returns a list of buckets represented by {@link OmBucketInfo}
+   * in the given volume.
+   *
+   * @param volumeName
+   *   Required parameter volume name determines buckets in which volume
+   *   to return.
+   * @param startBucket
+   *   Optional start bucket name parameter indicating where to start
+   *   the bucket listing from, this key is excluded from the result.
+   * @param bucketPrefix
+   *   Optional start key parameter, restricting the response to buckets
+   *   that begin with the specified name.
+   * @param maxNumOfBuckets
+   *   The maximum number of buckets to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of buckets.
+   * @throws IOException
+   */
+  List<OmBucketInfo> listBuckets(String volumeName,
+                                 String startBucket, String bucketPrefix, int maxNumOfBuckets)
+      throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
new file mode 100644
index 0000000..4bbce81
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java
@@ -0,0 +1,315 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.util.Time;
+import org.iq80.leveldb.DBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * OM bucket manager.
+ */
+public class BucketManagerImpl implements BucketManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BucketManagerImpl.class);
+
+  /**
+   * OMMetadataManager is used for accessing OM MetadataDB and ReadWriteLock.
+   */
+  private final OMMetadataManager metadataManager;
+
+  /**
+   * Constructs BucketManager.
+   * @param metadataManager
+   */
+  public BucketManagerImpl(OMMetadataManager metadataManager){
+    this.metadataManager = metadataManager;
+  }
+
+  /**
+   * MetadataDB is maintained in MetadataManager and shared between
+   * BucketManager and VolumeManager. (and also by KeyManager)
+   *
+   * BucketManager uses MetadataDB to store bucket level information.
+   *
+   * Keys used in BucketManager for storing data into MetadataDB
+   * for BucketInfo:
+   * {volume/bucket} -> bucketInfo
+   *
+   * Work flow of create bucket:
+   *
+   * -> Check if the Volume exists in metadataDB, if not throw
+   * VolumeNotFoundException.
+   * -> Else check if the Bucket exists in metadataDB, if so throw
+   * BucketExistException
+   * -> Else update MetadataDB with VolumeInfo.
+   */
+
+  /**
+   * Creates a bucket.
+   * @param bucketInfo - OmBucketInfo.
+   */
+  @Override
+  public void createBucket(OmBucketInfo bucketInfo) throws IOException {
+    Preconditions.checkNotNull(bucketInfo);
+    metadataManager.writeLock().lock();
+    String volumeName = bucketInfo.getVolumeName();
+    String bucketName = bucketInfo.getBucketName();
+    try {
+      byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
+      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+
+      //Check if the volume exists
+      if (metadataManager.get(volumeKey) == null) {
+        LOG.debug("volume: {} not found ", volumeName);
+        throw new OMException("Volume doesn't exist",
+            OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+      //Check if bucket already exists
+      if (metadataManager.get(bucketKey) != null) {
+        LOG.debug("bucket: {} already exists ", bucketName);
+        throw new OMException("Bucket already exist",
+            OMException.ResultCodes.FAILED_BUCKET_ALREADY_EXISTS);
+      }
+
+      OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+          .setVolumeName(bucketInfo.getVolumeName())
+          .setBucketName(bucketInfo.getBucketName())
+          .setAcls(bucketInfo.getAcls())
+          .setStorageType(bucketInfo.getStorageType())
+          .setIsVersionEnabled(bucketInfo.getIsVersionEnabled())
+          .setCreationTime(Time.now())
+          .build();
+      metadataManager.put(bucketKey, omBucketInfo.getProtobuf().toByteArray());
+
+      LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
+    } catch (IOException | DBException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Bucket creation failed for bucket:{} in volume:{}",
+            bucketName, volumeName, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Returns Bucket Information.
+   *
+   * @param volumeName - Name of the Volume.
+   * @param bucketName - Name of the Bucket.
+   */
+  @Override
+  public OmBucketInfo getBucketInfo(String volumeName, String bucketName)
+      throws IOException {
+    Preconditions.checkNotNull(volumeName);
+    Preconditions.checkNotNull(bucketName);
+    metadataManager.readLock().lock();
+    try {
+      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+      byte[] value = metadataManager.get(bucketKey);
+      if (value == null) {
+        LOG.debug("bucket: {} not found in volume: {}.", bucketName,
+            volumeName);
+        throw new OMException("Bucket not found",
+            OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
+      }
+      return OmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(value));
+    } catch (IOException | DBException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Exception while getting bucket info for bucket: {}",
+            bucketName, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  /**
+   * Sets bucket property from args.
+   * @param args - BucketArgs.
+   * @throws IOException
+   */
+  @Override
+  public void setBucketProperty(OmBucketArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    try {
+      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+      //Check if volume exists
+      if(metadataManager.get(metadataManager.getVolumeKey(volumeName)) ==
+          null) {
+        LOG.debug("volume: {} not found ", volumeName);
+        throw new OMException("Volume doesn't exist",
+            OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+      byte[] value = metadataManager.get(bucketKey);
+      //Check if bucket exist
+      if(value == null) {
+        LOG.debug("bucket: {} not found ", bucketName);
+        throw new OMException("Bucket doesn't exist",
+            OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
+      }
+      OmBucketInfo oldBucketInfo = OmBucketInfo.getFromProtobuf(
+          BucketInfo.parseFrom(value));
+      OmBucketInfo.Builder bucketInfoBuilder = OmBucketInfo.newBuilder();
+      bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName())
+          .setBucketName(oldBucketInfo.getBucketName());
+
+      //Check ACLs to update
+      if(args.getAddAcls() != null || args.getRemoveAcls() != null) {
+        bucketInfoBuilder.setAcls(getUpdatedAclList(oldBucketInfo.getAcls(),
+            args.getRemoveAcls(), args.getAddAcls()));
+        LOG.debug("Updating ACLs for bucket: {} in volume: {}",
+            bucketName, volumeName);
+      } else {
+        bucketInfoBuilder.setAcls(oldBucketInfo.getAcls());
+      }
+
+      //Check StorageType to update
+      StorageType storageType = args.getStorageType();
+      if (storageType != null) {
+        bucketInfoBuilder.setStorageType(storageType);
+        LOG.debug("Updating bucket storage type for bucket: {} in volume: {}",
+            bucketName, volumeName);
+      } else {
+        bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType());
+      }
+
+      //Check Versioning to update
+      Boolean versioning = args.getIsVersionEnabled();
+      if (versioning != null) {
+        bucketInfoBuilder.setIsVersionEnabled(versioning);
+        LOG.debug("Updating bucket versioning for bucket: {} in volume: {}",
+            bucketName, volumeName);
+      } else {
+        bucketInfoBuilder
+            .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled());
+      }
+      bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime());
+
+      metadataManager.put(bucketKey,
+          bucketInfoBuilder.build().getProtobuf().toByteArray());
+    } catch (IOException | DBException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
+            bucketName, volumeName, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Updates the existing ACL list with remove and add ACLs that are passed.
+   * Remove is done before Add.
+   *
+   * @param existingAcls - old ACL list.
+   * @param removeAcls - ACLs to be removed.
+   * @param addAcls - ACLs to be added.
+   * @return updated ACL list.
+   */
+  private List<OzoneAcl> getUpdatedAclList(List<OzoneAcl> existingAcls,
+      List<OzoneAcl> removeAcls, List<OzoneAcl> addAcls) {
+    if(removeAcls != null && !removeAcls.isEmpty()) {
+      existingAcls.removeAll(removeAcls);
+    }
+    if(addAcls != null && !addAcls.isEmpty()) {
+      addAcls.stream().filter(acl -> !existingAcls.contains(acl)).forEach(
+          existingAcls::add);
+    }
+    return existingAcls;
+  }
+
+  /**
+   * Deletes an existing empty bucket from volume.
+   * @param volumeName - Name of the volume.
+   * @param bucketName - Name of the bucket.
+   * @throws IOException
+   */
+  public void deleteBucket(String volumeName, String bucketName)
+      throws IOException {
+    Preconditions.checkNotNull(volumeName);
+    Preconditions.checkNotNull(bucketName);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+      //Check if volume exists
+      if (metadataManager.get(metadataManager.getVolumeKey(volumeName))
+          == null) {
+        LOG.debug("volume: {} not found ", volumeName);
+        throw new OMException("Volume doesn't exist",
+            OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+      //Check if bucket exist
+      if (metadataManager.get(bucketKey) == null) {
+        LOG.debug("bucket: {} not found ", bucketName);
+        throw new OMException("Bucket doesn't exist",
+            OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
+      }
+      //Check if bucket is empty
+      if (!metadataManager.isBucketEmpty(volumeName, bucketName)) {
+        LOG.debug("bucket: {} is not empty ", bucketName);
+        throw new OMException("Bucket is not empty",
+            OMException.ResultCodes.FAILED_BUCKET_NOT_EMPTY);
+      }
+      metadataManager.delete(bucketKey);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
+            volumeName, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmBucketInfo> listBuckets(String volumeName,
+                                        String startBucket, String bucketPrefix, int maxNumOfBuckets)
+      throws IOException {
+    Preconditions.checkNotNull(volumeName);
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.listBuckets(
+          volumeName, startBucket, bucketPrefix, maxNumOfBuckets);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java
new file mode 100644
index 0000000..ee23fe0
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.BackgroundTask;
+import org.apache.hadoop.utils.BackgroundTaskQueue;
+import org.apache.hadoop.utils.BackgroundTaskResult;
+import org.apache.hadoop.utils.BackgroundTaskResult.EmptyTaskResult;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT;
+
+/**
+ * This is the background service to delete keys.
+ * Scan the metadata of om periodically to get
+ * the keys with prefix "#deleting" and ask scm to
+ * delete metadata accordingly, if scm returns
+ * success for keys, then clean up those keys.
+ */
+public class KeyDeletingService extends BackgroundService {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(KeyDeletingService.class);
+
+  // The thread pool size for key deleting service.
+  private final static int KEY_DELETING_CORE_POOL_SIZE = 2;
+
+  private final ScmBlockLocationProtocol scmClient;
+  private final KeyManager manager;
+  private final int keyLimitPerTask;
+
+  public KeyDeletingService(ScmBlockLocationProtocol scmClient,
+      KeyManager manager, long serviceInterval,
+      long serviceTimeout, Configuration conf) {
+    super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS,
+        KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
+    this.scmClient = scmClient;
+    this.manager = manager;
+    this.keyLimitPerTask = conf.getInt(OZONE_KEY_DELETING_LIMIT_PER_TASK,
+        OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT);
+  }
+
+  @Override
+  public BackgroundTaskQueue getTasks() {
+    BackgroundTaskQueue queue = new BackgroundTaskQueue();
+    queue.add(new KeyDeletingTask());
+    return queue;
+  }
+
+  /**
+   * A key deleting task scans OM DB and looking for a certain number
+   * of pending-deletion keys, sends these keys along with their associated
+   * blocks to SCM for deletion. Once SCM confirms keys are deleted (once
+   * SCM persisted the blocks info in its deletedBlockLog), it removes
+   * these keys from the DB.
+   */
+  private class KeyDeletingTask implements
+      BackgroundTask<BackgroundTaskResult> {
+
+    @Override
+    public int getPriority() {
+      return 0;
+    }
+
+    @Override
+    public BackgroundTaskResult call() throws Exception {
+      try {
+        long startTime = Time.monotonicNow();
+        List<BlockGroup> keyBlocksList = manager
+            .getPendingDeletionKeys(keyLimitPerTask);
+        if (keyBlocksList.size() > 0) {
+          LOG.info("Found {} to-delete keys in OM", keyBlocksList.size());
+          List<DeleteBlockGroupResult> results =
+              scmClient.deleteKeyBlocks(keyBlocksList);
+          for (DeleteBlockGroupResult result : results) {
+            if (result.isSuccess()) {
+              try {
+                // Purge key from OM DB.
+                manager.deletePendingDeletionKey(result.getObjectKey());
+                LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
+              } catch (IOException e) {
+                // if a pending deletion key is failed to delete,
+                // print a warning here and retain it in this state,
+                // so that it can be attempt to delete next time.
+                LOG.warn("Failed to delete pending-deletion key {}",
+                    result.getObjectKey(), e);
+              }
+            } else {
+              // Key deletion failed, retry in next interval.
+              LOG.warn("Key {} deletion failed because some of the blocks"
+                  + " were failed to delete, failed blocks: {}",
+                  result.getObjectKey(),
+                  StringUtils.join(",", result.getFailedBlocks()));
+            }
+          }
+
+          if (!results.isEmpty()) {
+            LOG.info("Number of key deleted from OM DB: {},"
+                + " task elapsed time: {}ms",
+                results.size(), Time.monotonicNow() - startTime);
+          }
+
+          return results::size;
+        } else {
+          LOG.debug("No pending deletion key found in OM");
+        }
+      } catch (IOException e) {
+        LOG.error("Unable to get pending deletion keys, retry in"
+            + " next interval", e);
+      }
+      return EmptyTaskResult.newResult();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
new file mode 100644
index 0000000..226c07d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
@@ -0,0 +1,175 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Handles key level commands.
+ */
+public interface KeyManager {
+
+  /**
+   * Start key manager.
+   */
+  void start();
+
+  /**
+   * Stop key manager.
+   */
+  void stop() throws IOException;
+
+  /**
+   * After calling commit, the key will be made visible. There can be multiple
+   * open key writes in parallel (identified by client id). The most recently
+   * committed one will be the one visible.
+   *
+   * @param args the key to commit.
+   * @param clientID the client that is committing.
+   * @throws IOException
+   */
+  void commitKey(OmKeyArgs args, int clientID) throws IOException;
+
+  /**
+   * A client calls this on an open key, to request to allocate a new block,
+   * and appended to the tail of current block list of the open client.
+   *
+   * @param args the key to append
+   * @param clientID the client requesting block.
+   * @return the reference to the new block.
+   * @throws IOException
+   */
+  OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException;
+  /**
+   * Given the args of a key to put, write an open key entry to meta data.
+   *
+   * In case that the container creation or key write failed on
+   * DistributedStorageHandler, this key's metadata will still stay in OM.
+   * TODO garbage collect the open keys that never get closed
+   *
+   * @param args the args of the key provided by client.
+   * @return a OpenKeySession instance client uses to talk to container.
+   * @throws Exception
+   */
+  OpenKeySession openKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Look up an existing key. Return the info of the key to client side, which
+   * DistributedStorageHandler will use to access the data on datanode.
+   *
+   * @param args the args of the key provided by client.
+   * @return a OmKeyInfo instance client uses to talk to container.
+   * @throws IOException
+   */
+  OmKeyInfo lookupKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Renames an existing key within a bucket.
+   *
+   * @param args the args of the key provided by client.
+   * @param toKeyName New name to be used for the key
+   * @throws IOException if specified key doesn't exist or
+   * some other I/O errors while renaming the key.
+   */
+  void renameKey(OmKeyArgs args, String toKeyName) throws IOException;
+
+  /**
+   * Deletes an object by an object key. The key will be immediately removed
+   * from OM namespace and become invisible to clients. The object data
+   * will be removed in async manner that might retain for some time.
+   *
+   * @param args the args of the key provided by client.
+   * @throws IOException if specified key doesn't exist or
+   * some other I/O errors while deleting an object.
+   */
+  void deleteKey(OmKeyArgs args) throws IOException;
+
+  /**
+   * Returns a list of keys represented by {@link OmKeyInfo}
+   * in the given bucket.
+   *
+   * @param volumeName
+   *   the name of the volume.
+   * @param bucketName
+   *   the name of the bucket.
+   * @param startKey
+   *   the start key name, only the keys whose name is
+   *   after this value will be included in the result.
+   *   This key is excluded from the result.
+   * @param keyPrefix
+   *   key name prefix, only the keys whose name has
+   *   this prefix will be included in the result.
+   * @param maxKeys
+   *   the maximum number of keys to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of keys.
+   * @throws IOException
+   */
+  List<OmKeyInfo> listKeys(String volumeName,
+                           String bucketName, String startKey, String keyPrefix, int maxKeys)
+      throws IOException;
+
+  /**
+   * Returns a list of pending deletion key info that ups to the given count.
+   * Each entry is a {@link BlockGroup}, which contains the info about the
+   * key name and all its associated block IDs. A pending deletion key is
+   * stored with #deleting# prefix in OM DB.
+   *
+   * @param count max number of keys to return.
+   * @return a list of {@link BlockGroup} representing keys and blocks.
+   * @throws IOException
+   */
+  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
+
+  /**
+   * Deletes a pending deletion key by its name. This is often called when
+   * key can be safely deleted from this layer. Once called, all footprints
+   * of the key will be purged from OM DB.
+   *
+   * @param objectKeyName object key name with #deleting# prefix.
+   * @throws IOException if specified key doesn't exist or other I/O errors.
+   */
+  void deletePendingDeletionKey(String objectKeyName) throws IOException;
+
+  /**
+   * Returns a list of all still open key info. Which contains the info about
+   * the key name and all its associated block IDs. A pending open key has
+   * prefix #open# in OM DB.
+   *
+   * @return a list of {@link BlockGroup} representing keys and blocks.
+   * @throws IOException
+   */
+  List<BlockGroup> getExpiredOpenKeys() throws IOException;
+
+  /**
+   * Deletes a expired open key by its name. Called when a hanging key has been
+   * lingering for too long. Once called, the open key entries gets removed
+   * from OM mdata data.
+   *
+   * @param objectKeyName object key name with #open# prefix.
+   * @throws IOException if specified key doesn't exist or other I/O errors.
+   */
+  void deleteExpiredOpenKey(String objectKeyName) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
new file mode 100644
index 0000000..ba92a29
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -0,0 +1,566 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BackgroundService;
+import org.apache.hadoop.utils.BatchOperation;
+import org.iq80.leveldb.DBException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+    .OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone
+    .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB;
+import org.apache.hadoop.hdds.protocol
+    .proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.protocol
+    .proto.HddsProtos.ReplicationFactor;
+
+
+/**
+ * Implementation of keyManager.
+ */
+public class KeyManagerImpl implements KeyManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(KeyManagerImpl.class);
+
+  /**
+   * A SCM block client, used to talk to SCM to allocate block during putKey.
+   */
+  private final ScmBlockLocationProtocol scmBlockClient;
+  private final OMMetadataManager metadataManager;
+  private final long scmBlockSize;
+  private final boolean useRatis;
+  private final BackgroundService keyDeletingService;
+  private final BackgroundService openKeyCleanupService;
+
+  private final long preallocateMax;
+  private final Random random;
+  private final String omId;
+
+  public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient,
+                        OMMetadataManager metadataManager, OzoneConfiguration conf,
+                        String omId) {
+    this.scmBlockClient = scmBlockClient;
+    this.metadataManager = metadataManager;
+    this.scmBlockSize = conf.getLong(OZONE_SCM_BLOCK_SIZE_IN_MB,
+        OZONE_SCM_BLOCK_SIZE_DEFAULT) * OzoneConsts.MB;
+    this.useRatis = conf.getBoolean(DFS_CONTAINER_RATIS_ENABLED_KEY,
+        DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
+    long  blockDeleteInterval = conf.getTimeDuration(
+        OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
+        OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
+        TimeUnit.MILLISECONDS);
+    long serviceTimeout = conf.getTimeDuration(
+        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
+        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
+        TimeUnit.MILLISECONDS);
+    this.preallocateMax = conf.getLong(
+        OZONE_KEY_PREALLOCATION_MAXSIZE,
+        OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT);
+    keyDeletingService = new KeyDeletingService(
+        scmBlockClient, this, blockDeleteInterval, serviceTimeout, conf);
+    int openkeyCheckInterval = conf.getInt(
+        OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS,
+        OZONE_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_SECONDS_DEFAULT);
+    openKeyCleanupService = new OpenKeyCleanupService(
+        scmBlockClient, this, openkeyCheckInterval, serviceTimeout);
+    random = new Random();
+    this.omId = omId;
+  }
+
+  @VisibleForTesting
+  public BackgroundService getOpenKeyCleanupService() {
+    return openKeyCleanupService;
+  }
+
+  @Override
+  public void start() {
+    keyDeletingService.start();
+    openKeyCleanupService.start();
+  }
+
+  @Override
+  public void stop() throws IOException {
+    keyDeletingService.shutdown();
+    openKeyCleanupService.shutdown();
+  }
+
+  private void validateBucket(String volumeName, String bucketName)
+      throws IOException {
+    byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
+    byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
+
+    //Check if the volume exists
+    if(metadataManager.get(volumeKey) == null) {
+      LOG.error("volume not found: {}", volumeName);
+      throw new OMException("Volume not found",
+          OMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
+    }
+    //Check if bucket already exists
+    if(metadataManager.get(bucketKey) == null) {
+      LOG.error("bucket not found: {}/{} ", volumeName, bucketName);
+      throw new OMException("Bucket not found",
+          OMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
+    }
+  }
+
+  @Override
+  public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+      throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+
+    try {
+      validateBucket(volumeName, bucketName);
+      String objectKey = metadataManager.getKeyWithDBPrefix(
+          volumeName, bucketName, keyName);
+      byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID);
+      byte[] keyData = metadataManager.get(openKey);
+      if (keyData == null) {
+        LOG.error("Allocate block for a key not in open status in meta store " +
+            objectKey + " with ID " + clientID);
+        throw new OMException("Open Key not found",
+            OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+      OmKeyInfo keyInfo =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(keyData));
+      AllocatedBlock allocatedBlock =
+          scmBlockClient.allocateBlock(scmBlockSize, keyInfo.getType(),
+              keyInfo.getFactor(), omId);
+      OmKeyLocationInfo info = new OmKeyLocationInfo.Builder()
+          .setBlockID(allocatedBlock.getBlockID())
+          .setShouldCreateContainer(allocatedBlock.getCreateContainer())
+          .setLength(scmBlockSize)
+          .setOffset(0)
+          .build();
+      // current version not committed, so new blocks coming now are added to
+      // the same version
+      keyInfo.appendNewBlocks(Collections.singletonList(info));
+      keyInfo.updateModifcationTime();
+      metadataManager.put(openKey, keyInfo.getProtobuf().toByteArray());
+      return info;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public OpenKeySession openKey(OmKeyArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    ReplicationFactor factor = args.getFactor();
+    ReplicationType type = args.getType();
+
+    // If user does not specify a replication strategy or
+    // replication factor, OM will use defaults.
+    if(factor == null) {
+      factor = useRatis ? ReplicationFactor.THREE: ReplicationFactor.ONE;
+    }
+
+    if(type == null) {
+      type = useRatis ? ReplicationType.RATIS : ReplicationType.STAND_ALONE;
+    }
+
+    try {
+      validateBucket(volumeName, bucketName);
+      long requestedSize = Math.min(preallocateMax, args.getDataSize());
+      List<OmKeyLocationInfo> locations = new ArrayList<>();
+      String objectKey = metadataManager.getKeyWithDBPrefix(
+          volumeName, bucketName, keyName);
+      // requested size is not required but more like a optimization:
+      // SCM looks at the requested, if it 0, no block will be allocated at
+      // the point, if client needs more blocks, client can always call
+      // allocateBlock. But if requested size is not 0, OM will preallocate
+      // some blocks and piggyback to client, to save RPC calls.
+      while (requestedSize > 0) {
+        long allocateSize = Math.min(scmBlockSize, requestedSize);
+        AllocatedBlock allocatedBlock =
+            scmBlockClient.allocateBlock(allocateSize, type, factor, omId);
+        OmKeyLocationInfo subKeyInfo = new OmKeyLocationInfo.Builder()
+            .setBlockID(allocatedBlock.getBlockID())
+            .setShouldCreateContainer(allocatedBlock.getCreateContainer())
+            .setLength(allocateSize)
+            .setOffset(0)
+            .build();
+        locations.add(subKeyInfo);
+        requestedSize -= allocateSize;
+      }
+      // NOTE size of a key is not a hard limit on anything, it is a value that
+      // client should expect, in terms of current size of key. If client sets a
+      // value, then this value is used, otherwise, we allocate a single block
+      // which is the current size, if read by the client.
+      long size = args.getDataSize() >= 0 ? args.getDataSize() : scmBlockSize;
+      byte[] keyKey = metadataManager.getDBKeyBytes(
+          volumeName, bucketName, keyName);
+      byte[] value = metadataManager.get(keyKey);
+      OmKeyInfo keyInfo;
+      long openVersion;
+      if (value != null) {
+        // the key already exist, the new blocks will be added as new version
+        keyInfo = OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
+        // when locations.size = 0, the new version will have identical blocks
+        // as its previous version
+        openVersion = keyInfo.addNewVersion(locations);
+        keyInfo.setDataSize(size + keyInfo.getDataSize());
+      } else {
+        // the key does not exist, create a new object, the new blocks are the
+        // version 0
+        long currentTime = Time.now();
+        keyInfo = new OmKeyInfo.Builder()
+            .setVolumeName(args.getVolumeName())
+            .setBucketName(args.getBucketName())
+            .setKeyName(args.getKeyName())
+            .setOmKeyLocationInfos(Collections.singletonList(
+                new OmKeyLocationInfoGroup(0, locations)))
+            .setCreationTime(currentTime)
+            .setModificationTime(currentTime)
+            .setDataSize(size)
+            .setReplicationType(type)
+            .setReplicationFactor(factor)
+            .build();
+        openVersion = 0;
+      }
+      // Generate a random ID which is not already in meta db.
+      int id = -1;
+      // in general this should finish in a couple times at most. putting some
+      // arbitrary large number here to avoid dead loop.
+      for (int j = 0; j < 10000; j++) {
+        id = random.nextInt();
+        byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, id);
+        if (metadataManager.get(openKey) == null) {
+          metadataManager.put(openKey, keyInfo.getProtobuf().toByteArray());
+          break;
+        }
+      }
+      if (id == -1) {
+        throw new IOException("Failed to find a usable id for " + objectKey);
+      }
+      LOG.debug("Key {} allocated in volume {} bucket {}",
+          keyName, volumeName, bucketName);
+      return new OpenKeySession(id, keyInfo, openVersion);
+    } catch (OMException e) {
+      throw e;
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Key open failed for volume:{} bucket:{} key:{}",
+            volumeName, bucketName, keyName, ex);
+      }
+      throw new OMException(ex.getMessage(),
+          OMException.ResultCodes.FAILED_KEY_ALLOCATION);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void commitKey(OmKeyArgs args, int clientID) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    try {
+      validateBucket(volumeName, bucketName);
+      String objectKey = metadataManager.getKeyWithDBPrefix(
+          volumeName, bucketName, keyName);
+      byte[] objectKeyBytes = metadataManager.getDBKeyBytes(volumeName,
+          bucketName, keyName);
+      byte[] openKey = metadataManager.getOpenKeyNameBytes(objectKey, clientID);
+      byte[] openKeyData = metadataManager.get(openKey);
+      if (openKeyData == null) {
+        throw new OMException("Commit a key without corresponding entry " +
+            DFSUtil.bytes2String(openKey), ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+      OmKeyInfo keyInfo =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(openKeyData));
+      keyInfo.setDataSize(args.getDataSize());
+      keyInfo.setModificationTime(Time.now());
+      BatchOperation batch = new BatchOperation();
+      batch.delete(openKey);
+      batch.put(objectKeyBytes, keyInfo.getProtobuf().toByteArray());
+      metadataManager.writeBatch(batch);
+    } catch (OMException e) {
+      throw e;
+    } catch (IOException ex) {
+      LOG.error("Key commit failed for volume:{} bucket:{} key:{}",
+          volumeName, bucketName, keyName, ex);
+      throw new OMException(ex.getMessage(),
+          OMException.ResultCodes.FAILED_KEY_ALLOCATION);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    try {
+      byte[] keyKey = metadataManager.getDBKeyBytes(
+          volumeName, bucketName, keyName);
+      byte[] value = metadataManager.get(keyKey);
+      if (value == null) {
+        LOG.debug("volume:{} bucket:{} Key:{} not found",
+            volumeName, bucketName, keyName);
+        throw new OMException("Key not found",
+            OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+      return OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(value));
+    } catch (DBException ex) {
+      LOG.error("Get key failed for volume:{} bucket:{} key:{}",
+          volumeName, bucketName, keyName, ex);
+      throw new OMException(ex.getMessage(),
+          OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
+    Preconditions.checkNotNull(args);
+    Preconditions.checkNotNull(toKeyName);
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String fromKeyName = args.getKeyName();
+    if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
+      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.",
+          volumeName, bucketName, fromKeyName, toKeyName);
+      throw new OMException("Key name is empty",
+          ResultCodes.FAILED_INVALID_KEY_NAME);
+    }
+
+    metadataManager.writeLock().lock();
+    try {
+      // fromKeyName should exist
+      byte[] fromKey = metadataManager.getDBKeyBytes(
+          volumeName, bucketName, fromKeyName);
+      byte[] fromKeyValue = metadataManager.get(fromKey);
+      if (fromKeyValue == null) {
+        // TODO: Add support for renaming open key
+        LOG.error(
+            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
+                + "Key: {} not found.", volumeName, bucketName, fromKeyName,
+            toKeyName, fromKeyName);
+        throw new OMException("Key not found",
+            OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+
+      // toKeyName should not exist
+      byte[] toKey =
+          metadataManager.getDBKeyBytes(volumeName, bucketName, toKeyName);
+      byte[] toKeyValue = metadataManager.get(toKey);
+      if (toKeyValue != null) {
+        LOG.error(
+            "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. "
+                + "Key: {} already exists.", volumeName, bucketName,
+            fromKeyName, toKeyName, toKeyName);
+        throw new OMException("Key not found",
+            OMException.ResultCodes.FAILED_KEY_ALREADY_EXISTS);
+      }
+
+      if (fromKeyName.equals(toKeyName)) {
+        return;
+      }
+
+      OmKeyInfo newKeyInfo =
+          OmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(fromKeyValue));
+      newKeyInfo.setKeyName(toKeyName);
+      newKeyInfo.updateModifcationTime();
+      BatchOperation batch = new BatchOperation();
+      batch.delete(fromKey);
+      batch.put(toKey, newKeyInfo.getProtobuf().toByteArray());
+      metadataManager.writeBatch(batch);
+    } catch (DBException ex) {
+      LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}.",
+          volumeName, bucketName, fromKeyName, toKeyName, ex);
+      throw new OMException(ex.getMessage(),
+          ResultCodes.FAILED_KEY_RENAME);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public void deleteKey(OmKeyArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    String volumeName = args.getVolumeName();
+    String bucketName = args.getBucketName();
+    String keyName = args.getKeyName();
+    try {
+      byte[] objectKey = metadataManager.getDBKeyBytes(
+          volumeName, bucketName, keyName);
+      byte[] objectValue = metadataManager.get(objectKey);
+      if (objectValue == null) {
+        throw new OMException("Key not found",
+            OMException.ResultCodes.FAILED_KEY_NOT_FOUND);
+      }
+      byte[] deletingKey = metadataManager.getDeletedKeyName(objectKey);
+      BatchOperation batch = new BatchOperation();
+      batch.put(deletingKey, objectValue);
+      batch.delete(objectKey);
+      metadataManager.writeBatch(batch);
+    } catch (DBException ex) {
+      LOG.error(String.format("Delete key failed for volume:%s "
+          + "bucket:%s key:%s", volumeName, bucketName, keyName), ex);
+      throw new OMException(ex.getMessage(), ex,
+          ResultCodes.FAILED_KEY_DELETION);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+                                  String startKey, String keyPrefix, int maxKeys) throws IOException {
+    Preconditions.checkNotNull(volumeName);
+    Preconditions.checkNotNull(bucketName);
+
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.listKeys(volumeName, bucketName,
+          startKey, keyPrefix, maxKeys);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  @Override
+  public List<BlockGroup> getPendingDeletionKeys(final int count)
+      throws IOException {
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.getPendingDeletionKeys(count);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void deletePendingDeletionKey(String objectKeyName)
+      throws IOException{
+    Preconditions.checkNotNull(objectKeyName);
+    if (!objectKeyName.startsWith(OzoneConsts.DELETING_KEY_PREFIX)) {
+      throw new IllegalArgumentException("Invalid key name,"
+          + " the name should be the key name with deleting prefix");
+    }
+
+    // Simply removes the entry from OM DB.
+    metadataManager.writeLock().lock();
+    try {
+      byte[] pendingDelKey = DFSUtil.string2Bytes(objectKeyName);
+      byte[] delKeyValue = metadataManager.get(pendingDelKey);
+      if (delKeyValue == null) {
+        throw new IOException("Failed to delete key " + objectKeyName
+            + " because it is not found in DB");
+      }
+      metadataManager.delete(pendingDelKey);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  @Override
+  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.getExpiredOpenKeys();
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  @Override
+  public void deleteExpiredOpenKey(String objectKeyName) throws IOException {
+    Preconditions.checkNotNull(objectKeyName);
+    if (!objectKeyName.startsWith(OzoneConsts.OPEN_KEY_PREFIX)) {
+      throw new IllegalArgumentException("Invalid key name,"
+          + " the name should be the key name with open key prefix");
+    }
+
+    // Simply removes the entry from OM DB.
+    metadataManager.writeLock().lock();
+    try {
+      byte[] openKey = DFSUtil.string2Bytes(objectKeyName);
+      byte[] delKeyValue = metadataManager.get(openKey);
+      if (delKeyValue == null) {
+        throw new IOException("Failed to delete key " + objectKeyName
+            + " because it is not found in DB");
+      }
+      metadataManager.delete(openKey);
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java
new file mode 100644
index 0000000..3ab9f47
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMXBean.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
+
+/**
+ * This is the JMX management interface for OM information.
+ */
+@InterfaceAudience.Private
+public interface OMMXBean extends ServiceRuntimeInfo {
+
+  String getRpcPort();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
new file mode 100644
index 0000000..f2e78e6
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -0,0 +1,253 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.common.BlockGroup;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.utils.BatchOperation;
+import org.apache.hadoop.utils.MetadataStore;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.locks.Lock;
+
+/**
+ * OM metadata manager interface.
+ */
+public interface OMMetadataManager {
+  /**
+   * Start metadata manager.
+   */
+  void start();
+
+  /**
+   * Stop metadata manager.
+   */
+  void stop() throws IOException;
+
+  /**
+   * Get metadata store.
+   * @return metadata store.
+   */
+  @VisibleForTesting
+  MetadataStore getStore();
+
+  /**
+   * Returns the read lock used on Metadata DB.
+   * @return readLock
+   */
+  Lock readLock();
+
+  /**
+   * Returns the write lock used on Metadata DB.
+   * @return writeLock
+   */
+  Lock writeLock();
+
+  /**
+   * Returns the value associated with this key.
+   * @param key - key
+   * @return value
+   */
+  byte[] get(byte[] key) throws IOException;
+
+  /**
+   * Puts a Key into Metadata DB.
+   * @param key   - key
+   * @param value - value
+   */
+  void put(byte[] key, byte[] value) throws IOException;
+
+  /**
+   * Deletes a Key from Metadata DB.
+   * @param key   - key
+   */
+  void delete(byte[] key) throws IOException;
+
+  /**
+   * Atomic write a batch of operations.
+   * @param batch
+   * @throws IOException
+   */
+  void writeBatch(BatchOperation batch) throws IOException;
+
+  /**
+   * Given a volume return the corresponding DB key.
+   * @param volume - Volume name
+   */
+  byte[] getVolumeKey(String volume);
+
+  /**
+   * Given a user return the corresponding DB key.
+   * @param user - User name
+   */
+  byte[] getUserKey(String user);
+
+  /**
+   * Given a volume and bucket, return the corresponding DB key.
+   * @param volume - User name
+   * @param bucket - Bucket name
+   */
+  byte[] getBucketKey(String volume, String bucket);
+
+  /**
+   * Given a volume, bucket and a key, return the corresponding DB key.
+   * @param volume - volume name
+   * @param bucket - bucket name
+   * @param key - key name
+   * @return bytes of DB key.
+   */
+  byte[] getDBKeyBytes(String volume, String bucket, String key);
+
+  /**
+   * Returns the DB key name of a deleted key in OM metadata store.
+   * The name for a deleted key has prefix #deleting# followed by
+   * the actual key name.
+   * @param keyName - key name
+   * @return bytes of DB key.
+   */
+  byte[] getDeletedKeyName(byte[] keyName);
+
+  /**
+   * Returns the DB key name of a open key in OM metadata store.
+   * Should be #open# prefix followed by actual key name.
+   * @param keyName - key name
+   * @param id - the id for this open
+   * @return bytes of DB key.
+   */
+  byte[] getOpenKeyNameBytes(String keyName, int id);
+
+  /**
+   * Returns the full name of a key given volume name, bucket name and key name.
+   * Generally done by padding certain delimiters.
+   *
+   * @param volumeName - volume name
+   * @param bucketName - bucket name
+   * @param keyName - key name
+   * @return the full key name.
+   */
+  String getKeyWithDBPrefix(String volumeName, String bucketName,
+      String keyName);
+
+  /**
+   * Given a volume, check if it is empty,
+   * i.e there are no buckets inside it.
+   * @param volume - Volume name
+   */
+  boolean isVolumeEmpty(String volume) throws IOException;
+
+  /**
+   * Given a volume/bucket, check if it is empty,
+   * i.e there are no keys inside it.
+   * @param volume - Volume name
+   * @param  bucket - Bucket name
+   * @return true if the bucket is empty
+   */
+  boolean isBucketEmpty(String volume, String bucket) throws IOException;
+
+  /**
+   * Returns a list of buckets represented by {@link OmBucketInfo}
+   * in the given volume.
+   *
+   * @param volumeName
+   *   the name of the volume. This argument is required,
+   *   this method returns buckets in this given volume.
+   * @param startBucket
+   *   the start bucket name. Only the buckets whose name is
+   *   after this value will be included in the result.
+   *   This key is excluded from the result.
+   * @param bucketPrefix
+   *   bucket name prefix. Only the buckets whose name has
+   *   this prefix will be included in the result.
+   * @param maxNumOfBuckets
+   *   the maximum number of buckets to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of buckets.
+   * @throws IOException
+   */
+  List<OmBucketInfo> listBuckets(String volumeName, String startBucket,
+                                 String bucketPrefix, int maxNumOfBuckets) throws IOException;
+
+  /**
+   * Returns a list of keys represented by {@link OmKeyInfo}
+   * in the given bucket.
+   *
+   * @param volumeName
+   *   the name of the volume.
+   * @param bucketName
+   *   the name of the bucket.
+   * @param startKey
+   *   the start key name, only the keys whose name is
+   *   after this value will be included in the result.
+   *   This key is excluded from the result.
+   * @param keyPrefix
+   *   key name prefix, only the keys whose name has
+   *   this prefix will be included in the result.
+   * @param maxKeys
+   *   the maximum number of keys to return. It ensures
+   *   the size of the result will not exceed this limit.
+   * @return a list of keys.
+   * @throws IOException
+   */
+  List<OmKeyInfo> listKeys(String volumeName,
+                           String bucketName, String startKey, String keyPrefix, int maxKeys)
+      throws IOException;
+
+  /**
+   * Returns a list of volumes owned by a given user; if user is null,
+   * returns all volumes.
+   *
+   * @param userName
+   *   volume owner
+   * @param prefix
+   *   the volume prefix used to filter the listing result.
+   * @param startKey
+   *   the start volume name determines where to start listing from,
+   *   this key is excluded from the result.
+   * @param maxKeys
+   *   the maximum number of volumes to return.
+   * @return a list of {@link OmVolumeArgs}
+   * @throws IOException
+   */
+  List<OmVolumeArgs> listVolumes(String userName, String prefix,
+                                 String startKey, int maxKeys) throws IOException;
+
+  /**
+   * Returns a list of pending deletion key info that ups to the given count.
+   * Each entry is a {@link BlockGroup}, which contains the info about the
+   * key name and all its associated block IDs. A pending deletion key is
+   * stored with #deleting# prefix in OM DB.
+   *
+   * @param count max number of keys to return.
+   * @return a list of {@link BlockGroup} represent keys and blocks.
+   * @throws IOException
+   */
+  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
+
+  /**
+   * Returns a list of all still open key info. Which contains the info about
+   * the key name and all its associated block IDs. A pending open key has
+   * prefix #open# in OM DB.
+   *
+   * @return a list of {@link BlockGroup} representing keys and blocks.
+   * @throws IOException
+   */
+  List<BlockGroup> getExpiredOpenKeys() throws IOException;
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
index 54e219b..fbd6eb8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.ozone.client.rest;
 
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 
 import java.util.List;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 43b94a1..fc70514 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ozone.KsmUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.BucketArgs;
 import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -43,24 +43,22 @@ import org.apache.hadoop.ozone.client.io.LengthInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolPB;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
+    .OzoneManagerProtocolProtos.ServicePort;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.protocolPB
@@ -80,7 +78,7 @@ import java.util.UUID;
 import java.util.stream.Collectors;
 
 /**
- * Ozone RPC Client Implementation, it connects to KSM, SCM and DataNode
+ * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode
  * to execute client calls. This uses RPC protocol for communication
  * with the servers.
  */
@@ -92,8 +90,8 @@ public class RpcClient implements ClientProtocol {
   private final OzoneConfiguration conf;
   private final StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB
-      keySpaceManagerClient;
+  private final OzoneManagerProtocolClientSideTranslatorPB
+      ozoneManagerClient;
   private final XceiverClientManager xceiverClientManager;
   private final int chunkSize;
   private final UserGroupInformation ugi;
@@ -109,20 +107,20 @@ public class RpcClient implements ClientProtocol {
     Preconditions.checkNotNull(conf);
     this.conf = new OzoneConfiguration(conf);
     this.ugi = UserGroupInformation.getCurrentUser();
-    this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
-    this.groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT);
-    long ksmVersion =
-        RPC.getProtocolVersion(KeySpaceManagerProtocolPB.class);
-    InetSocketAddress ksmAddress = KsmUtils
-        .getKsmAddressForClients(conf);
-    RPC.setProtocolEngine(conf, KeySpaceManagerProtocolPB.class,
+    this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
+        OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
+    this.groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS,
+        OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT);
+    long omVersion =
+        RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
+    InetSocketAddress omAddress = OmUtils
+        .getOmAddressForClients(conf);
+    RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
         ProtobufRpcEngine.class);
-    this.keySpaceManagerClient =
-        new KeySpaceManagerProtocolClientSideTranslatorPB(
-            RPC.getProxy(KeySpaceManagerProtocolPB.class, ksmVersion,
-                ksmAddress, UserGroupInformation.getCurrentUser(), conf,
+    this.ozoneManagerClient =
+        new OzoneManagerProtocolClientSideTranslatorPB(
+            RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
+                omAddress, UserGroupInformation.getCurrentUser(), conf,
                 NetUtils.getDefaultSocketFactory(conf),
                 Client.getRpcTimeout(conf)));
 
@@ -155,7 +153,7 @@ public class RpcClient implements ClientProtocol {
   }
 
   private InetSocketAddress getScmAddressForClient() throws IOException {
-    List<ServiceInfo> services = keySpaceManagerClient.getServiceList();
+    List<ServiceInfo> services = ozoneManagerClient.getServiceList();
     ServiceInfo scmInfo = services.stream().filter(
         a -> a.getNodeType().equals(HddsProtos.NodeType.SCM))
         .collect(Collectors.toList()).get(0);
@@ -195,7 +193,7 @@ public class RpcClient implements ClientProtocol {
       listOfAcls.addAll(volArgs.getAcls());
     }
 
-    KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder();
+    OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder();
     builder.setVolume(volumeName);
     builder.setAdminName(admin);
     builder.setOwnerName(owner);
@@ -204,12 +202,12 @@ public class RpcClient implements ClientProtocol {
     //Remove duplicates and add ACLs
     for (OzoneAcl ozoneAcl :
         listOfAcls.stream().distinct().collect(Collectors.toList())) {
-      builder.addOzoneAcls(KSMPBHelper.convertOzoneAcl(ozoneAcl));
+      builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(ozoneAcl));
     }
 
     LOG.info("Creating Volume: {}, with {} as owner and quota set to {} bytes.",
         volumeName, owner, quota);
-    keySpaceManagerClient.createVolume(builder.build());
+    ozoneManagerClient.createVolume(builder.build());
   }
 
   @Override
@@ -217,7 +215,7 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(owner);
-    keySpaceManagerClient.setOwner(volumeName, owner);
+    ozoneManagerClient.setOwner(volumeName, owner);
   }
 
   @Override
@@ -226,14 +224,14 @@ public class RpcClient implements ClientProtocol {
     HddsClientUtils.verifyResourceName(volumeName);
     Preconditions.checkNotNull(quota);
     long quotaInBytes = quota.sizeInBytes();
-    keySpaceManagerClient.setQuota(volumeName, quotaInBytes);
+    ozoneManagerClient.setQuota(volumeName, quotaInBytes);
   }
 
   @Override
   public OzoneVolume getVolumeDetails(String volumeName)
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName);
-    KsmVolumeArgs volume = keySpaceManagerClient.getVolumeInfo(volumeName);
+    OmVolumeArgs volume = ozoneManagerClient.getVolumeInfo(volumeName);
     return new OzoneVolume(
         conf,
         this,
@@ -243,7 +241,7 @@ public class RpcClient implements ClientProtocol {
         volume.getQuotaInBytes(),
         volume.getCreationTime(),
         volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
+            map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
   }
 
   @Override
@@ -255,14 +253,14 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void deleteVolume(String volumeName) throws IOException {
     HddsClientUtils.verifyResourceName(volumeName);
-    keySpaceManagerClient.deleteVolume(volumeName);
+    ozoneManagerClient.deleteVolume(volumeName);
   }
 
   @Override
   public List<OzoneVolume> listVolumes(String volumePrefix, String prevVolume,
                                        int maxListResult)
       throws IOException {
-    List<KsmVolumeArgs> volumes = keySpaceManagerClient.listAllVolumes(
+    List<OmVolumeArgs> volumes = ozoneManagerClient.listAllVolumes(
         volumePrefix, prevVolume, maxListResult);
 
     return volumes.stream().map(volume -> new OzoneVolume(
@@ -274,7 +272,7 @@ public class RpcClient implements ClientProtocol {
         volume.getQuotaInBytes(),
         volume.getCreationTime(),
         volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())))
+            map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList())))
         .collect(Collectors.toList());
   }
 
@@ -282,7 +280,7 @@ public class RpcClient implements ClientProtocol {
   public List<OzoneVolume> listVolumes(String user, String volumePrefix,
                                        String prevVolume, int maxListResult)
       throws IOException {
-    List<KsmVolumeArgs> volumes = keySpaceManagerClient.listVolumeByUser(
+    List<OmVolumeArgs> volumes = ozoneManagerClient.listVolumeByUser(
         user, volumePrefix, prevVolume, maxListResult);
 
     return volumes.stream().map(volume -> new OzoneVolume(
@@ -294,7 +292,7 @@ public class RpcClient implements ClientProtocol {
         volume.getQuotaInBytes(),
         volume.getCreationTime(),
         volume.getAclMap().ozoneAclGetProtobuf().stream().
-            map(KSMPBHelper::convertOzoneAcl).collect(Collectors.toList())))
+            map(OMPBHelper::convertOzoneAcl).collect(Collectors.toList())))
         .collect(Collectors.toList());
   }
 
@@ -329,7 +327,7 @@ public class RpcClient implements ClientProtocol {
       listOfAcls.addAll(bucketArgs.getAcls());
     }
 
-    KsmBucketInfo.Builder builder = KsmBucketInfo.newBuilder();
+    OmBucketInfo.Builder builder = OmBucketInfo.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setIsVersionEnabled(isVersionEnabled)
@@ -339,7 +337,7 @@ public class RpcClient implements ClientProtocol {
     LOG.info("Creating Bucket: {}/{}, with Versioning {} and " +
             "Storage Type set to {}", volumeName, bucketName, isVersionEnabled,
             storageType);
-    keySpaceManagerClient.createBucket(builder.build());
+    ozoneManagerClient.createBucket(builder.build());
   }
 
   @Override
@@ -348,11 +346,11 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(addAcls);
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setAddAcls(addAcls);
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
@@ -361,11 +359,11 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(removeAcls);
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setRemoveAcls(removeAcls);
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
@@ -374,11 +372,11 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(versioning);
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setIsVersionEnabled(versioning);
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
@@ -387,18 +385,18 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(storageType);
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setStorageType(storageType);
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
   public void deleteBucket(
       String volumeName, String bucketName) throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    keySpaceManagerClient.deleteBucket(volumeName, bucketName);
+    ozoneManagerClient.deleteBucket(volumeName, bucketName);
   }
 
   @Override
@@ -411,8 +409,8 @@ public class RpcClient implements ClientProtocol {
   public OzoneBucket getBucketDetails(
       String volumeName, String bucketName) throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
-    KsmBucketInfo bucketArgs =
-        keySpaceManagerClient.getBucketInfo(volumeName, bucketName);
+    OmBucketInfo bucketArgs =
+        ozoneManagerClient.getBucketInfo(volumeName, bucketName);
     return new OzoneBucket(
         conf,
         this,
@@ -428,7 +426,7 @@ public class RpcClient implements ClientProtocol {
   public List<OzoneBucket> listBuckets(String volumeName, String bucketPrefix,
                                        String prevBucket, int maxListResult)
       throws IOException {
-    List<KsmBucketInfo> buckets = keySpaceManagerClient.listBuckets(
+    List<OmBucketInfo> buckets = ozoneManagerClient.listBuckets(
         volumeName, prevBucket, bucketPrefix, maxListResult);
 
     return buckets.stream().map(bucket -> new OzoneBucket(
@@ -451,7 +449,7 @@ public class RpcClient implements ClientProtocol {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     HddsClientUtils.checkNotNull(keyName, type, factor);
     String requestId = UUID.randomUUID().toString();
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
@@ -460,13 +458,13 @@ public class RpcClient implements ClientProtocol {
         .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue()))
         .build();
 
-    OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs);
+    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
     ChunkGroupOutputStream groupOutputStream =
         new ChunkGroupOutputStream.Builder()
             .setHandler(openKey)
             .setXceiverClientManager(xceiverClientManager)
             .setScmClient(storageContainerLocationClient)
-            .setKsmClient(keySpaceManagerClient)
+            .setOmClient(ozoneManagerClient)
             .setChunkSize(chunkSize)
             .setRequestID(requestId)
             .setType(HddsProtos.ReplicationType.valueOf(type.toString()))
@@ -485,14 +483,14 @@ public class RpcClient implements ClientProtocol {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(keyName);
     String requestId = UUID.randomUUID().toString();
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
         .build();
-    KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs);
+    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
     LengthInputStream lengthInputStream =
-        ChunkGroupInputStream.getFromKsmKeyInfo(
+        ChunkGroupInputStream.getFromOmKeyInfo(
             keyInfo, xceiverClientManager, storageContainerLocationClient,
             requestId);
     return new OzoneInputStream(
@@ -505,12 +503,12 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     Preconditions.checkNotNull(keyName);
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
         .build();
-    keySpaceManagerClient.deleteKey(keyArgs);
+    ozoneManagerClient.deleteKey(keyArgs);
   }
 
   @Override
@@ -518,12 +516,12 @@ public class RpcClient implements ClientProtocol {
       String fromKeyName, String toKeyName) throws IOException {
     HddsClientUtils.verifyResourceName(volumeName, bucketName);
     HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(fromKeyName)
         .build();
-    keySpaceManagerClient.renameKey(keyArgs, toKeyName);
+    ozoneManagerClient.renameKey(keyArgs, toKeyName);
   }
 
   @Override
@@ -531,7 +529,7 @@ public class RpcClient implements ClientProtocol {
                                  String keyPrefix, String prevKey,
                                  int maxListResult)
       throws IOException {
-    List<KsmKeyInfo> keys = keySpaceManagerClient.listKeys(
+    List<OmKeyInfo> keys = ozoneManagerClient.listKeys(
         volumeName, bucketName, prevKey, keyPrefix, maxListResult);
 
     return keys.stream().map(key -> new OzoneKey(
@@ -551,12 +549,12 @@ public class RpcClient implements ClientProtocol {
     Preconditions.checkNotNull(volumeName);
     Preconditions.checkNotNull(bucketName);
     Preconditions.checkNotNull(keyName);
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
         .build();
-    KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs);
+    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
     return new OzoneKey(keyInfo.getVolumeName(),
                         keyInfo.getBucketName(),
                         keyInfo.getKeyName(),
@@ -568,7 +566,7 @@ public class RpcClient implements ClientProtocol {
   @Override
   public void close() throws IOException {
     IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
-    IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient);
+    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
     IOUtils.cleanupWithLogger(LOG, xceiverClientManager);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
index a270f61..3aefe8a 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.client;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.junit.Rule;
 import org.junit.Test;
@@ -30,7 +30,7 @@ import org.junit.rules.Timeout;
 import java.net.InetSocketAddress;
 
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress;
+import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
 
@@ -79,27 +79,27 @@ public class TestHddsClientUtils {
   }
 
   @Test
-  public void testGetKSMAddress() {
+  public void testGetOmAddress() {
     final Configuration conf = new OzoneConfiguration();
 
     // First try a client address with just a host name. Verify it falls
     // back to the default port.
-    conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "1.2.3.4");
-    InetSocketAddress addr = getKsmAddress(conf);
+    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4");
+    InetSocketAddress addr = getOmAddress(conf);
     assertThat(addr.getHostString(), is("1.2.3.4"));
-    assertThat(addr.getPort(), is(KSMConfigKeys.OZONE_KSM_PORT_DEFAULT));
+    assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT));
 
     // Next try a client address with just a host name and port. Verify the port
-    // is ignored and the default KSM port is used.
-    conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "1.2.3.4:100");
-    addr = getKsmAddress(conf);
+    // is ignored and the default OM port is used.
+    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "1.2.3.4:100");
+    addr = getOmAddress(conf);
     assertThat(addr.getHostString(), is("1.2.3.4"));
     assertThat(addr.getPort(), is(100));
 
     // Assert the we are able to use default configs if no value is specified.
-    conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "");
-    addr = getKsmAddress(conf);
+    conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "");
+    addr = getOmAddress(conf);
     assertThat(addr.getHostString(), is("0.0.0.0"));
-    assertThat(addr.getPort(), is(KSMConfigKeys.OZONE_KSM_PORT_DEFAULT));
+    assertThat(addr.getPort(), is(OMConfigKeys.OZONE_OM_PORT_DEFAULT));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml
index d8581d1..83d023e 100644
--- a/hadoop-ozone/common/pom.xml
+++ b/hadoop-ozone/common/pom.xml
@@ -69,7 +69,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
               <source>
                 <directory>${basedir}/src/main/proto</directory>
                 <includes>
-                  <include>KeySpaceManagerProtocol.proto</include>
+                  <include>OzoneManagerProtocol.proto</include>
                 </includes>
               </source>
             </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/bin/ozone
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone
index 390f089..9495eff 100755
--- a/hadoop-ozone/common/src/main/bin/ozone
+++ b/hadoop-ozone/common/src/main/bin/ozone
@@ -38,10 +38,9 @@ function hadoop_usage
   hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
   hadoop_add_subcommand "freon" client "runs an ozone data generator"
   hadoop_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning."
-  hadoop_add_subcommand "getozoneconf" client "get ozone config values from
-  configuration"
+  hadoop_add_subcommand "getozoneconf" client "get ozone config values from configuration"
   hadoop_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode."
-  hadoop_add_subcommand "ksm" daemon "Ozone keyspace manager"
+  hadoop_add_subcommand "om" daemon "Ozone Manager"
   hadoop_add_subcommand "o3" client "command line interface for ozone"
   hadoop_add_subcommand "noz" client "ozone debug tool, convert ozone metadata into relational data"
   hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service"
@@ -94,9 +93,9 @@ function ozonecmd_case
     getozoneconf)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.freon.OzoneGetConf;
     ;;
-    ksm)
+    om)
       HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-      HADOOP_CLASSNAME=org.apache.hadoop.ozone.ksm.KeySpaceManager
+      HADOOP_CLASSNAME=org.apache.hadoop.ozone.om.OzoneManager
     ;;
     oz)
       HADOOP_CLASSNAME=org.apache.hadoop.ozone.web.ozShell.Shell

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/bin/start-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/start-ozone.sh b/hadoop-ozone/common/src/main/bin/start-ozone.sh
index 92bc4a8..29c3674 100644
--- a/hadoop-ozone/common/src/main/bin/start-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/start-ozone.sh
@@ -179,19 +179,19 @@ if [[ "${AUTOHA_ENABLED}" = "true" ]]; then
 fi
 
 #---------------------------------------------------------
-# Ozone keyspacemanager nodes
-KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
-echo "Starting key space manager nodes [${KSM_NODES}]"
-if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
-  KSM_NODES=$(hostname)
+# Ozone ozonemanager nodes
+OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null)
+echo "Starting Ozone Manager nodes [${OM_NODES}]"
+if [[ "${OM_NODES}" == "0.0.0.0" ]]; then
+  OM_NODES=$(hostname)
 fi
 
-hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
+hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \
   --workers \
   --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${KSM_NODES}" \
+  --hostnames "${OM_NODES}" \
   --daemon start \
-  ksm
+  om
 
 HADOOP_JUMBO_RETCOUNTER=$?
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/bin/stop-ozone.sh
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/bin/stop-ozone.sh b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
index be55be4..5f5faf0 100644
--- a/hadoop-ozone/common/src/main/bin/stop-ozone.sh
+++ b/hadoop-ozone/common/src/main/bin/stop-ozone.sh
@@ -73,19 +73,19 @@ else
 fi
 
 #---------------------------------------------------------
-# Ozone keyspacemanager nodes
-KSM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -keyspacemanagers 2>/dev/null)
-echo "Stopping key space manager nodes [${KSM_NODES}]"
-if [[ "${KSM_NODES}" == "0.0.0.0" ]]; then
-  KSM_NODES=$(hostname)
+# Ozone Manager nodes
+OM_NODES=$("${HADOOP_HDFS_HOME}/bin/ozone" getozoneconf -ozonemanagers 2>/dev/null)
+echo "Stopping Ozone Manager nodes [${OM_NODES}]"
+if [[ "${OM_NODES}" == "0.0.0.0" ]]; then
+  OM_NODES=$(hostname)
 fi
 
-hadoop_uservar_su hdfs ksm "${HADOOP_HDFS_HOME}/bin/ozone" \
+hadoop_uservar_su hdfs om "${HADOOP_HDFS_HOME}/bin/ozone" \
   --workers \
   --config "${HADOOP_CONF_DIR}" \
-  --hostnames "${KSM_NODES}" \
+  --hostnames "${OM_NODES}" \
   --daemon stop \
-  ksm
+  om
 
 #---------------------------------------------------------
 # Ozone storagecontainermanager nodes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
deleted file mode 100644
index 1025963..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetUtils;
-
-import com.google.common.base.Optional;
-import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
-import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_HTTP_BIND_PORT_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_BIND_HOST_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_PORT_DEFAULT;
-
-/**
- * Stateless helper functions for the server and client side of KSM
- * communication.
- */
-public final class KsmUtils {
-
-  private KsmUtils() {
-  }
-
-  /**
-   * Retrieve the socket address that is used by KSM.
-   * @param conf
-   * @return Target InetSocketAddress for the SCM service endpoint.
-   */
-  public static InetSocketAddress getKsmAddress(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-
-    return NetUtils.createSocketAddr(
-        host.or(OZONE_KSM_BIND_HOST_DEFAULT) + ":" +
-            getKsmRpcPort(conf));
-  }
-
-  /**
-   * Retrieve the socket address that should be used by clients to connect
-   * to KSM.
-   * @param conf
-   * @return Target InetSocketAddress for the KSM service endpoint.
-   */
-  public static InetSocketAddress getKsmAddressForClients(
-      Configuration conf) {
-    final Optional<String> host = getHostNameFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-
-    if (!host.isPresent()) {
-      throw new IllegalArgumentException(
-          OZONE_KSM_ADDRESS_KEY + " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
-              " details on configuring Ozone.");
-    }
-
-    return NetUtils.createSocketAddr(
-        host.get() + ":" + getKsmRpcPort(conf));
-  }
-
-  public static int getKsmRpcPort(Configuration conf) {
-    // If no port number is specified then we'll just try the defaultBindPort.
-    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
-        OZONE_KSM_ADDRESS_KEY);
-    return port.or(OZONE_KSM_PORT_DEFAULT);
-  }
-
-  public static int getKsmRestPort(Configuration conf) {
-    // If no port number is specified then we'll just try the default
-    // HTTP BindPort.
-    final Optional<Integer> port =
-        getPortNumberFromConfigKeys(conf, OZONE_KSM_HTTP_ADDRESS_KEY);
-    return port.or(OZONE_KSM_HTTP_BIND_PORT_DEFAULT);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
new file mode 100644
index 0000000..0974104
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
+
+import com.google.common.base.Optional;
+import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
+import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_BIND_HOST_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_BIND_PORT_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_PORT_DEFAULT;
+
+/**
+ * Stateless helper functions for the server and client side of OM
+ * communication.
+ */
+public final class OmUtils {
+
+  private OmUtils() {
+  }
+
+  /**
+   * Retrieve the socket address that is used by OM.
+   * @param conf
+   * @return Target InetSocketAddress for the SCM service endpoint.
+   */
+  public static InetSocketAddress getOmAddress(
+      Configuration conf) {
+    final Optional<String> host = getHostNameFromConfigKeys(conf,
+        OZONE_OM_ADDRESS_KEY);
+
+    return NetUtils.createSocketAddr(
+        host.or(OZONE_OM_BIND_HOST_DEFAULT) + ":" +
+            getOmRpcPort(conf));
+  }
+
+  /**
+   * Retrieve the socket address that should be used by clients to connect
+   * to OM.
+   * @param conf
+   * @return Target InetSocketAddress for the OM service endpoint.
+   */
+  public static InetSocketAddress getOmAddressForClients(
+      Configuration conf) {
+    final Optional<String> host = getHostNameFromConfigKeys(conf,
+        OZONE_OM_ADDRESS_KEY);
+
+    if (!host.isPresent()) {
+      throw new IllegalArgumentException(
+          OZONE_OM_ADDRESS_KEY + " must be defined. See" +
+              " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
+              " details on configuring Ozone.");
+    }
+
+    return NetUtils.createSocketAddr(
+        host.get() + ":" + getOmRpcPort(conf));
+  }
+
+  public static int getOmRpcPort(Configuration conf) {
+    // If no port number is specified then we'll just try the defaultBindPort.
+    final Optional<Integer> port = getPortNumberFromConfigKeys(conf,
+        OZONE_OM_ADDRESS_KEY);
+    return port.or(OZONE_OM_PORT_DEFAULT);
+  }
+
+  public static int getOmRestPort(Configuration conf) {
+    // If no port number is specified then we'll just try the default
+    // HTTP BindPort.
+    final Optional<Integer> port =
+        getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY);
+    return port.or(OZONE_OM_HTTP_BIND_PORT_DEFAULT);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
index d5f9093..ffbca6a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.KsmUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -53,8 +53,8 @@ public class OzoneGetConf extends Configured implements Tool {
     EXCLUDE_FILE("-excludeFile",
         "gets the exclude file path that defines the datanodes " +
             "that need to decommissioned."),
-    KEYSPACEMANAGER("-keyspacemanagers",
-        "gets list of ozone key space manager nodes in the cluster"),
+    OZONEMANAGER("-ozonemanagers",
+        "gets list of Ozone Manager nodes in the cluster"),
     STORAGECONTAINERMANAGER("-storagecontainermanagers",
         "gets list of ozone storage container manager nodes in the cluster"),
     CONFKEY("-confKey [key]", "gets a specific key from the configuration");
@@ -63,8 +63,8 @@ public class OzoneGetConf extends Configured implements Tool {
 
     static {
       HANDLERS = new HashMap<String, OzoneGetConf.CommandHandler>();
-      HANDLERS.put(StringUtils.toLowerCase(KEYSPACEMANAGER.getName()),
-          new KeySpaceManagersCommandHandler());
+      HANDLERS.put(StringUtils.toLowerCase(OZONEMANAGER.getName()),
+          new OzoneManagersCommandHandler());
       HANDLERS.put(StringUtils.toLowerCase(STORAGECONTAINERMANAGER.getName()),
           new StorageContainerManagersCommandHandler());
       HANDLERS.put(StringUtils.toLowerCase(CONFKEY.getName()),
@@ -245,13 +245,13 @@ public class OzoneGetConf extends Configured implements Tool {
   }
 
   /**
-   * Handler for {@link Command#KEYSPACEMANAGER}.
+   * Handler for {@link Command#OZONEMANAGER}.
    */
-  static class KeySpaceManagersCommandHandler extends CommandHandler {
+  static class OzoneManagersCommandHandler extends CommandHandler {
     @Override
     public int doWorkInternal(OzoneGetConf tool, String[] args)
         throws IOException {
-      tool.printOut(KsmUtils.getKsmAddress(tool.getConf()).getHostName());
+      tool.printOut(OmUtils.getOmAddress(tool.getConf()).getHostName());
       return 0;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
deleted file mode 100644
index 75cf613..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/KSMConfigKeys.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-/**
- * KSM Constants.
- */
-public final class KSMConfigKeys {
-  /**
-   * Never constructed.
-   */
-  private KSMConfigKeys() {
-  }
-
-
-  public static final String OZONE_KSM_HANDLER_COUNT_KEY =
-      "ozone.ksm.handler.count.key";
-  public static final int OZONE_KSM_HANDLER_COUNT_DEFAULT = 20;
-
-  public static final String OZONE_KSM_ADDRESS_KEY =
-      "ozone.ksm.address";
-  public static final String OZONE_KSM_BIND_HOST_DEFAULT =
-      "0.0.0.0";
-  public static final int OZONE_KSM_PORT_DEFAULT = 9862;
-
-  public static final String OZONE_KSM_HTTP_ENABLED_KEY =
-      "ozone.ksm.http.enabled";
-  public static final String OZONE_KSM_HTTP_BIND_HOST_KEY =
-      "ozone.ksm.http-bind-host";
-  public static final String OZONE_KSM_HTTPS_BIND_HOST_KEY =
-      "ozone.ksm.https-bind-host";
-  public static final String OZONE_KSM_HTTP_ADDRESS_KEY =
-      "ozone.ksm.http-address";
-  public static final String OZONE_KSM_HTTPS_ADDRESS_KEY =
-      "ozone.ksm.https-address";
-  public static final String OZONE_KSM_KEYTAB_FILE =
-      "ozone.ksm.keytab.file";
-  public static final String OZONE_KSM_HTTP_BIND_HOST_DEFAULT = "0.0.0.0";
-  public static final int OZONE_KSM_HTTP_BIND_PORT_DEFAULT = 9874;
-  public static final int OZONE_KSM_HTTPS_BIND_PORT_DEFAULT = 9875;
-
-  // LevelDB cache file uses an off-heap cache in LevelDB of 128 MB.
-  public static final String OZONE_KSM_DB_CACHE_SIZE_MB =
-      "ozone.ksm.db.cache.size.mb";
-  public static final int OZONE_KSM_DB_CACHE_SIZE_DEFAULT = 128;
-
-  public static final String OZONE_KSM_USER_MAX_VOLUME =
-      "ozone.ksm.user.max.volume";
-  public static final int OZONE_KSM_USER_MAX_VOLUME_DEFAULT = 1024;
-
-  // KSM Default user/group permissions
-  public static final String OZONE_KSM_USER_RIGHTS =
-      "ozone.ksm.user.rights";
-  public static final OzoneAcl.OzoneACLRights OZONE_KSM_USER_RIGHTS_DEFAULT =
-      OzoneAcl.OzoneACLRights.READ_WRITE;
-
-  public static final String OZONE_KSM_GROUP_RIGHTS =
-      "ozone.ksm.group.rights";
-  public static final OzoneAcl.OzoneACLRights OZONE_KSM_GROUP_RIGHTS_DEFAULT =
-      OzoneAcl.OzoneACLRights.READ_WRITE;
-
-  public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
-      "ozone.key.deleting.limit.per.task";
-  public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java
deleted file mode 100644
index 1211b50..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketArgs.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import java.util.List;
-import java.util.stream.Collectors;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketArgs;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
-
-/**
- * A class that encapsulates Bucket Arguments.
- */
-public final class KsmBucketArgs {
-  /**
-   * Name of the volume in which the bucket belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the bucket.
-   */
-  private final String bucketName;
-  /**
-   * ACL's that are to be added for the bucket.
-   */
-  private List<OzoneAcl> addAcls;
-  /**
-   * ACL's that are to be removed from the bucket.
-   */
-  private List<OzoneAcl> removeAcls;
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean isVersionEnabled;
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @param addAcls - ACL's to be added.
-   * @param removeAcls - ACL's to be removed.
-   * @param isVersionEnabled - Bucket version flag.
-   * @param storageType - Storage type to be used.
-   */
-  private KsmBucketArgs(String volumeName, String bucketName,
-      List<OzoneAcl> addAcls, List<OzoneAcl> removeAcls,
-      Boolean isVersionEnabled, StorageType storageType) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.addAcls = addAcls;
-    this.removeAcls = removeAcls;
-    this.isVersionEnabled = isVersionEnabled;
-    this.storageType = storageType;
-  }
-
-  /**
-   * Returns the Volume Name.
-   * @return String.
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns the Bucket Name.
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Returns the ACL's that are to be added.
-   * @return List<OzoneAclInfo>
-   */
-  public List<OzoneAcl> getAddAcls() {
-    return addAcls;
-  }
-
-  /**
-   * Returns the ACL's that are to be removed.
-   * @return List<OzoneAclInfo>
-   */
-  public List<OzoneAcl> getRemoveAcls() {
-    return removeAcls;
-  }
-
-  /**
-   * Returns true if bucket version is enabled, else false.
-   * @return isVersionEnabled
-   */
-  public Boolean getIsVersionEnabled() {
-    return isVersionEnabled;
-  }
-
-  /**
-   * Returns the type of storage to be used.
-   * @return StorageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns new builder class that builds a KsmBucketArgs.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder for KsmBucketArgs.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private List<OzoneAcl> addAcls;
-    private List<OzoneAcl> removeAcls;
-    private Boolean isVersionEnabled;
-    private StorageType storageType;
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setAddAcls(List<OzoneAcl> acls) {
-      this.addAcls = acls;
-      return this;
-    }
-
-    public Builder setRemoveAcls(List<OzoneAcl> acls) {
-      this.removeAcls = acls;
-      return this;
-    }
-
-    public Builder setIsVersionEnabled(Boolean versionFlag) {
-      this.isVersionEnabled = versionFlag;
-      return this;
-    }
-
-    public Builder setStorageType(StorageType storage) {
-      this.storageType = storage;
-      return this;
-    }
-
-    /**
-     * Constructs the KsmBucketArgs.
-     * @return instance of KsmBucketArgs.
-     */
-    public KsmBucketArgs build() {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      return new KsmBucketArgs(volumeName, bucketName, addAcls,
-          removeAcls, isVersionEnabled, storageType);
-    }
-  }
-
-  /**
-   * Creates BucketArgs protobuf from KsmBucketArgs.
-   */
-  public BucketArgs getProtobuf() {
-    BucketArgs.Builder builder = BucketArgs.newBuilder();
-    builder.setVolumeName(volumeName)
-        .setBucketName(bucketName);
-    if(addAcls != null && !addAcls.isEmpty()) {
-      builder.addAllAddAcls(addAcls.stream().map(
-          KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
-    }
-    if(removeAcls != null && !removeAcls.isEmpty()) {
-      builder.addAllRemoveAcls(removeAcls.stream().map(
-          KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()));
-    }
-    if(isVersionEnabled != null) {
-      builder.setIsVersionEnabled(isVersionEnabled);
-    }
-    if(storageType != null) {
-      builder.setStorageType(
-          PBHelperClient.convertStorageType(storageType));
-    }
-    return builder.build();
-  }
-
-  /**
-   * Parses BucketInfo protobuf and creates KsmBucketArgs.
-   * @param bucketArgs
-   * @return instance of KsmBucketArgs
-   */
-  public static KsmBucketArgs getFromProtobuf(BucketArgs bucketArgs) {
-    return new KsmBucketArgs(bucketArgs.getVolumeName(),
-        bucketArgs.getBucketName(),
-        bucketArgs.getAddAclsList().stream().map(
-            KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
-        bucketArgs.getRemoveAclsList().stream().map(
-            KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
-        bucketArgs.hasIsVersionEnabled() ?
-            bucketArgs.getIsVersionEnabled() : null,
-        bucketArgs.hasStorageType() ? PBHelperClient.convertStorageType(
-            bucketArgs.getStorageType()) : null);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java
deleted file mode 100644
index a49137a..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmBucketInfo.java
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
-
-import java.util.LinkedList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * A class that encapsulates Bucket Info.
- */
-public final class KsmBucketInfo {
-  /**
-   * Name of the volume in which the bucket belongs to.
-   */
-  private final String volumeName;
-  /**
-   * Name of the bucket.
-   */
-  private final String bucketName;
-  /**
-   * ACL Information.
-   */
-  private List<OzoneAcl> acls;
-  /**
-   * Bucket Version flag.
-   */
-  private Boolean isVersionEnabled;
-  /**
-   * Type of storage to be used for this bucket.
-   * [RAM_DISK, SSD, DISK, ARCHIVE]
-   */
-  private StorageType storageType;
-  /**
-   * Creation time of bucket.
-   */
-  private final long creationTime;
-
-  /**
-   * Private constructor, constructed via builder.
-   * @param volumeName - Volume name.
-   * @param bucketName - Bucket name.
-   * @param acls - list of ACLs.
-   * @param isVersionEnabled - Bucket version flag.
-   * @param storageType - Storage type to be used.
-   * @param creationTime - Bucket creation time.
-   */
-  private KsmBucketInfo(String volumeName, String bucketName,
-                        List<OzoneAcl> acls, boolean isVersionEnabled,
-                        StorageType storageType, long creationTime) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.acls = acls;
-    this.isVersionEnabled = isVersionEnabled;
-    this.storageType = storageType;
-    this.creationTime = creationTime;
-  }
-
-  /**
-   * Returns the Volume Name.
-   * @return String.
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Returns the Bucket Name.
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Returns the ACL's associated with this bucket.
-   * @return List<OzoneAcl>
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Returns true if bucket version is enabled, else false.
-   * @return isVersionEnabled
-   */
-  public boolean getIsVersionEnabled() {
-    return isVersionEnabled;
-  }
-
-  /**
-   * Returns the type of storage to be used.
-   * @return StorageType
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Returns creation time.
-   *
-   * @return long
-   */
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  /**
-   * Returns new builder class that builds a KsmBucketInfo.
-   *
-   * @return Builder
-   */
-  public static Builder newBuilder() {
-    return new Builder();
-  }
-
-  /**
-   * Builder for KsmBucketInfo.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private List<OzoneAcl> acls;
-    private Boolean isVersionEnabled;
-    private StorageType storageType;
-    private long creationTime;
-
-    Builder() {
-      //Default values
-      this.acls = new LinkedList<>();
-      this.isVersionEnabled = false;
-      this.storageType = StorageType.DISK;
-    }
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setAcls(List<OzoneAcl> listOfAcls) {
-      this.acls = listOfAcls;
-      return this;
-    }
-
-    public Builder setIsVersionEnabled(Boolean versionFlag) {
-      this.isVersionEnabled = versionFlag;
-      return this;
-    }
-
-    public Builder setStorageType(StorageType storage) {
-      this.storageType = storage;
-      return this;
-    }
-
-    public Builder setCreationTime(long createdOn) {
-      this.creationTime = createdOn;
-      return this;
-    }
-
-    /**
-     * Constructs the KsmBucketInfo.
-     * @return instance of KsmBucketInfo.
-     */
-    public KsmBucketInfo build() {
-      Preconditions.checkNotNull(volumeName);
-      Preconditions.checkNotNull(bucketName);
-      Preconditions.checkNotNull(acls);
-      Preconditions.checkNotNull(isVersionEnabled);
-      Preconditions.checkNotNull(storageType);
-
-      return new KsmBucketInfo(volumeName, bucketName, acls,
-          isVersionEnabled, storageType, creationTime);
-    }
-  }
-
-  /**
-   * Creates BucketInfo protobuf from KsmBucketInfo.
-   */
-  public BucketInfo getProtobuf() {
-    return BucketInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .addAllAcls(acls.stream().map(
-            KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()))
-        .setIsVersionEnabled(isVersionEnabled)
-        .setStorageType(PBHelperClient.convertStorageType(
-            storageType))
-        .setCreationTime(creationTime)
-        .build();
-  }
-
-  /**
-   * Parses BucketInfo protobuf and creates KsmBucketInfo.
-   * @param bucketInfo
-   * @return instance of KsmBucketInfo
-   */
-  public static KsmBucketInfo getFromProtobuf(BucketInfo bucketInfo) {
-    return new KsmBucketInfo(
-        bucketInfo.getVolumeName(),
-        bucketInfo.getBucketName(),
-        bucketInfo.getAclsList().stream().map(
-            KSMPBHelper::convertOzoneAcl).collect(Collectors.toList()),
-        bucketInfo.getIsVersionEnabled(),
-        PBHelperClient.convertStorageType(
-            bucketInfo.getStorageType()), bucketInfo.getCreationTime());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java
deleted file mode 100644
index cd17e28..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-
-/**
- * Args for key. Client use this to specify key's attributes on  key creation
- * (putKey()).
- */
-public final class KsmKeyArgs {
-  private final String volumeName;
-  private final String bucketName;
-  private final String keyName;
-  private long dataSize;
-  private final ReplicationType type;
-  private final ReplicationFactor factor;
-
-  private KsmKeyArgs(String volumeName, String bucketName, String keyName,
-      long dataSize, ReplicationType type, ReplicationFactor factor) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.dataSize = dataSize;
-    this.type = type;
-    this.factor = factor;
-  }
-
-  public ReplicationType getType() {
-    return type;
-  }
-
-  public ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public long getDataSize() {
-    return dataSize;
-  }
-
-  public void setDataSize(long size) {
-    dataSize = size;
-  }
-
-  /**
-   * Builder class of KsmKeyArgs.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private String keyName;
-    private long dataSize;
-    private ReplicationType type;
-    private ReplicationFactor factor;
-
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setKeyName(String key) {
-      this.keyName = key;
-      return this;
-    }
-
-    public Builder setDataSize(long size) {
-      this.dataSize = size;
-      return this;
-    }
-
-    public Builder setType(ReplicationType replicationType) {
-      this.type = replicationType;
-      return this;
-    }
-
-    public Builder setFactor(ReplicationFactor replicationFactor) {
-      this.factor = replicationFactor;
-      return this;
-    }
-
-    public KsmKeyArgs build() {
-      return new KsmKeyArgs(volumeName, bucketName, keyName, dataSize,
-          type, factor);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java
deleted file mode 100644
index 5d6e633..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyInfo.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.util.Time;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Args for key block. The block instance for the key requested in putKey.
- * This is returned from KSM to client, and client use class to talk to
- * datanode. Also, this is the metadata written to ksm.db on server side.
- */
-public final class KsmKeyInfo {
-  private final String volumeName;
-  private final String bucketName;
-  // name of key client specified
-  private String keyName;
-  private long dataSize;
-  private List<KsmKeyLocationInfoGroup> keyLocationVersions;
-  private final long creationTime;
-  private long modificationTime;
-  private HddsProtos.ReplicationType type;
-  private HddsProtos.ReplicationFactor factor;
-
-  private KsmKeyInfo(String volumeName, String bucketName, String keyName,
-      List<KsmKeyLocationInfoGroup> versions, long dataSize,
-      long creationTime, long modificationTime, HddsProtos.ReplicationType type,
-      HddsProtos.ReplicationFactor factor) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-    this.keyName = keyName;
-    this.dataSize = dataSize;
-    // it is important that the versions are ordered from old to new.
-    // Do this sanity check when versions got loaded on creating KsmKeyInfo.
-    // TODO : this is not necessary, here only because versioning is still a
-    // work in-progress, remove this following check when versioning is
-    // complete and prove correctly functioning
-    long currentVersion = -1;
-    for (KsmKeyLocationInfoGroup version : versions) {
-      Preconditions.checkArgument(
-            currentVersion + 1 == version.getVersion());
-      currentVersion = version.getVersion();
-    }
-    this.keyLocationVersions = versions;
-    this.creationTime = creationTime;
-    this.modificationTime = modificationTime;
-    this.factor = factor;
-    this.type = type;
-  }
-
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  public HddsProtos.ReplicationType getType() {
-    return type;
-  }
-
-  public HddsProtos.ReplicationFactor getFactor() {
-    return factor;
-  }
-
-  public String getKeyName() {
-    return keyName;
-  }
-
-  public void setKeyName(String keyName) {
-    this.keyName = keyName;
-  }
-
-  public long getDataSize() {
-    return dataSize;
-  }
-
-  public void setDataSize(long size) {
-    this.dataSize = size;
-  }
-
-  public synchronized KsmKeyLocationInfoGroup getLatestVersionLocations()
-      throws IOException {
-    return keyLocationVersions.size() == 0? null :
-        keyLocationVersions.get(keyLocationVersions.size() - 1);
-  }
-
-  public List<KsmKeyLocationInfoGroup> getKeyLocationVersions() {
-    return keyLocationVersions;
-  }
-
-  public void updateModifcationTime() {
-    this.modificationTime = Time.monotonicNow();
-  }
-
-  /**
-   * Append a set of blocks to the latest version. Note that these blocks are
-   * part of the latest version, not a new version.
-   *
-   * @param newLocationList the list of new blocks to be added.
-   * @throws IOException
-   */
-  public synchronized void appendNewBlocks(
-      List<KsmKeyLocationInfo> newLocationList) throws IOException {
-    if (keyLocationVersions.size() == 0) {
-      throw new IOException("Appending new block, but no version exist");
-    }
-    KsmKeyLocationInfoGroup currentLatestVersion =
-        keyLocationVersions.get(keyLocationVersions.size() - 1);
-    currentLatestVersion.appendNewBlocks(newLocationList);
-    setModificationTime(Time.now());
-  }
-
-  /**
-   * Add a new set of blocks. The new blocks will be added as appending a new
-   * version to the all version list.
-   *
-   * @param newLocationList the list of new blocks to be added.
-   * @throws IOException
-   */
-  public synchronized long addNewVersion(
-      List<KsmKeyLocationInfo> newLocationList) throws IOException {
-    long latestVersionNum;
-    if (keyLocationVersions.size() == 0) {
-      // no version exist, these blocks are the very first version.
-      keyLocationVersions.add(new KsmKeyLocationInfoGroup(0, newLocationList));
-      latestVersionNum = 0;
-    } else {
-      // it is important that the new version are always at the tail of the list
-      KsmKeyLocationInfoGroup currentLatestVersion =
-          keyLocationVersions.get(keyLocationVersions.size() - 1);
-      // the new version is created based on the current latest version
-      KsmKeyLocationInfoGroup newVersion =
-          currentLatestVersion.generateNextVersion(newLocationList);
-      keyLocationVersions.add(newVersion);
-      latestVersionNum = newVersion.getVersion();
-    }
-    setModificationTime(Time.now());
-    return latestVersionNum;
-  }
-
-  public long getCreationTime() {
-    return creationTime;
-  }
-
-  public long getModificationTime() {
-    return modificationTime;
-  }
-
-  public void setModificationTime(long modificationTime) {
-    this.modificationTime = modificationTime;
-  }
-
-  /**
-   * Builder of KsmKeyInfo.
-   */
-  public static class Builder {
-    private String volumeName;
-    private String bucketName;
-    private String keyName;
-    private long dataSize;
-    private List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups;
-    private long creationTime;
-    private long modificationTime;
-    private HddsProtos.ReplicationType type;
-    private HddsProtos.ReplicationFactor factor;
-
-    public Builder setVolumeName(String volume) {
-      this.volumeName = volume;
-      return this;
-    }
-
-    public Builder setBucketName(String bucket) {
-      this.bucketName = bucket;
-      return this;
-    }
-
-    public Builder setKeyName(String key) {
-      this.keyName = key;
-      return this;
-    }
-
-    public Builder setKsmKeyLocationInfos(
-        List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoList) {
-      this.ksmKeyLocationInfoGroups = ksmKeyLocationInfoList;
-      return this;
-    }
-
-    public Builder setDataSize(long size) {
-      this.dataSize = size;
-      return this;
-    }
-
-    public Builder setCreationTime(long crTime) {
-      this.creationTime = crTime;
-      return this;
-    }
-
-    public Builder setModificationTime(long mTime) {
-      this.modificationTime = mTime;
-      return this;
-    }
-
-    public Builder setReplicationFactor(HddsProtos.ReplicationFactor factor) {
-      this.factor = factor;
-      return this;
-    }
-
-    public Builder setReplicationType(HddsProtos.ReplicationType type) {
-      this.type = type;
-      return this;
-    }
-
-    public KsmKeyInfo build() {
-      return new KsmKeyInfo(
-          volumeName, bucketName, keyName, ksmKeyLocationInfoGroups,
-          dataSize, creationTime, modificationTime, type, factor);
-    }
-  }
-
-  public KeyInfo getProtobuf() {
-    long latestVersion = keyLocationVersions.size() == 0 ? -1 :
-        keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
-    return KeyInfo.newBuilder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setDataSize(dataSize)
-        .setFactor(factor)
-        .setType(type)
-        .addAllKeyLocationList(keyLocationVersions.stream()
-            .map(KsmKeyLocationInfoGroup::getProtobuf)
-            .collect(Collectors.toList()))
-        .setLatestVersion(latestVersion)
-        .setCreationTime(creationTime)
-        .setModificationTime(modificationTime)
-        .build();
-  }
-
-  public static KsmKeyInfo getFromProtobuf(KeyInfo keyInfo) {
-    return new KsmKeyInfo(
-        keyInfo.getVolumeName(),
-        keyInfo.getBucketName(),
-        keyInfo.getKeyName(),
-        keyInfo.getKeyLocationListList().stream()
-            .map(KsmKeyLocationInfoGroup::getFromProtobuf)
-            .collect(Collectors.toList()),
-        keyInfo.getDataSize(),
-        keyInfo.getCreationTime(),
-        keyInfo.getModificationTime(),
-        keyInfo.getType(),
-        keyInfo.getFactor());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
deleted file mode 100644
index 45feda0..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfo.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyLocation;
-
-/**
- * One key can be too huge to fit in one container. In which case it gets split
- * into a number of subkeys. This class represents one such subkey instance.
- */
-public final class KsmKeyLocationInfo {
-  private final BlockID blockID;
-  private final boolean shouldCreateContainer;
-  // the id of this subkey in all the subkeys.
-  private final long length;
-  private final long offset;
-  // the version number indicating when this block was added
-  private long createVersion;
-
-  private KsmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer,
-      long length, long offset) {
-    this.blockID = blockID;
-    this.shouldCreateContainer = shouldCreateContainer;
-    this.length = length;
-    this.offset = offset;
-  }
-
-  public void setCreateVersion(long version) {
-    createVersion = version;
-  }
-
-  public long getCreateVersion() {
-    return createVersion;
-  }
-
-  public BlockID getBlockID() {
-    return blockID;
-  }
-
-  public long getContainerID() {
-    return blockID.getContainerID();
-  }
-
-  public long getLocalID() {
-    return blockID.getLocalID();
-  }
-
-  public boolean getShouldCreateContainer() {
-    return shouldCreateContainer;
-  }
-
-  public long getLength() {
-    return length;
-  }
-
-  public long getOffset() {
-    return offset;
-  }
-
-  /**
-   * Builder of KsmKeyLocationInfo.
-   */
-  public static class Builder {
-    private BlockID blockID;
-    private boolean shouldCreateContainer;
-    private long length;
-    private long offset;
-
-    public Builder setBlockID(BlockID blockId) {
-      this.blockID = blockId;
-      return this;
-    }
-
-    public Builder setShouldCreateContainer(boolean create) {
-      this.shouldCreateContainer = create;
-      return this;
-    }
-
-    public Builder setLength(long len) {
-      this.length = len;
-      return this;
-    }
-
-    public Builder setOffset(long off) {
-      this.offset = off;
-      return this;
-    }
-
-    public KsmKeyLocationInfo build() {
-      return new KsmKeyLocationInfo(blockID,
-          shouldCreateContainer, length, offset);
-    }
-  }
-
-  public KeyLocation getProtobuf() {
-    return KeyLocation.newBuilder()
-        .setBlockID(blockID.getProtobuf())
-        .setShouldCreateContainer(shouldCreateContainer)
-        .setLength(length)
-        .setOffset(offset)
-        .setCreateVersion(createVersion)
-        .build();
-  }
-
-  public static KsmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
-    KsmKeyLocationInfo info = new KsmKeyLocationInfo(
-        BlockID.getFromProtobuf(keyLocation.getBlockID()),
-        keyLocation.getShouldCreateContainer(),
-        keyLocation.getLength(),
-        keyLocation.getOffset());
-    info.setCreateVersion(keyLocation.getCreateVersion());
-    return info;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
deleted file mode 100644
index 0facf3c..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyLocationInfoGroup.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyLocationList;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * A list of key locations. This class represents one single version of the
- * blocks of a key.
- */
-public class KsmKeyLocationInfoGroup {
-  private final long version;
-  private final List<KsmKeyLocationInfo> locationList;
-
-  public KsmKeyLocationInfoGroup(long version,
-      List<KsmKeyLocationInfo> locations) {
-    this.version = version;
-    this.locationList = locations;
-  }
-
-  /**
-   * Return only the blocks that are created in the most recent version.
-   *
-   * @return the list of blocks that are created in the latest version.
-   */
-  public List<KsmKeyLocationInfo> getBlocksLatestVersionOnly() {
-    List<KsmKeyLocationInfo> list = new ArrayList<>();
-    locationList.stream().filter(x -> x.getCreateVersion() == version)
-        .forEach(list::add);
-    return list;
-  }
-
-  public long getVersion() {
-    return version;
-  }
-
-  public List<KsmKeyLocationInfo> getLocationList() {
-    return locationList;
-  }
-
-  public KeyLocationList getProtobuf() {
-    return KeyLocationList.newBuilder()
-        .setVersion(version)
-        .addAllKeyLocations(
-            locationList.stream().map(KsmKeyLocationInfo::getProtobuf)
-                .collect(Collectors.toList()))
-        .build();
-  }
-
-  public static KsmKeyLocationInfoGroup getFromProtobuf(
-      KeyLocationList keyLocationList) {
-    return new KsmKeyLocationInfoGroup(
-        keyLocationList.getVersion(),
-        keyLocationList.getKeyLocationsList().stream()
-            .map(KsmKeyLocationInfo::getFromProtobuf)
-            .collect(Collectors.toList()));
-  }
-
-  /**
-   * Given a new block location, generate a new version list based upon this
-   * one.
-   *
-   * @param newLocationList a list of new location to be added.
-   * @return
-   */
-  KsmKeyLocationInfoGroup generateNextVersion(
-      List<KsmKeyLocationInfo> newLocationList) throws IOException {
-    // TODO : revisit if we can do this method more efficiently
-    // one potential inefficiency here is that later version always include
-    // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add
-    // more
-    List<KsmKeyLocationInfo> newList = new ArrayList<>();
-    newList.addAll(locationList);
-    for (KsmKeyLocationInfo newInfo : newLocationList) {
-      // all these new blocks will have addVersion of current version + 1
-      newInfo.setCreateVersion(version + 1);
-      newList.add(newInfo);
-    }
-    return new KsmKeyLocationInfoGroup(version + 1, newList);
-  }
-
-  void appendNewBlocks(List<KsmKeyLocationInfo> newLocationList)
-      throws IOException {
-    for (KsmKeyLocationInfo info : newLocationList) {
-      info.setCreateVersion(version);
-      locationList.add(info);
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("version:").append(version).append(" ");
-    for (KsmKeyLocationInfo kli : locationList) {
-      sb.append(kli.getLocalID()).append(" || ");
-    }
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java
deleted file mode 100644
index 7d9efad..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmOzoneAclMap.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm.helpers;
-
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
-
-import java.util.List;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.ArrayList;
-import java.util.HashMap;
-
-/**
- * This helper class keeps a map of all user and their permissions.
- */
-public class KsmOzoneAclMap {
-  // per Acl Type user:rights map
-  private ArrayList<Map<String, OzoneAclRights>> aclMaps;
-
-  KsmOzoneAclMap() {
-    aclMaps = new ArrayList<>();
-    for (OzoneAclType aclType : OzoneAclType.values()) {
-      aclMaps.add(aclType.ordinal(), new HashMap<>());
-    }
-  }
-
-  private Map<String, OzoneAclRights> getMap(OzoneAclType type) {
-    return aclMaps.get(type.ordinal());
-  }
-
-  // For a given acl type and user, get the stored acl
-  private OzoneAclRights getAcl(OzoneAclType type, String user) {
-    return getMap(type).get(user);
-  }
-
-  // Add a new acl to the map
-  public void addAcl(OzoneAclInfo acl) {
-    getMap(acl.getType()).put(acl.getName(), acl.getRights());
-  }
-
-  // for a given acl, check if the user has access rights
-  public boolean hasAccess(OzoneAclInfo acl) {
-    OzoneAclRights storedRights = getAcl(acl.getType(), acl.getName());
-    if (storedRights != null) {
-      switch (acl.getRights()) {
-      case READ:
-        return (storedRights == OzoneAclRights.READ)
-            || (storedRights == OzoneAclRights.READ_WRITE);
-      case WRITE:
-        return (storedRights == OzoneAclRights.WRITE)
-            || (storedRights == OzoneAclRights.READ_WRITE);
-      case READ_WRITE:
-        return (storedRights == OzoneAclRights.READ_WRITE);
-      default:
-        return false;
-      }
-    } else {
-      return false;
-    }
-  }
-
-  // Convert this map to OzoneAclInfo Protobuf List
-  public List<OzoneAclInfo> ozoneAclGetProtobuf() {
-    List<OzoneAclInfo> aclList = new LinkedList<>();
-    for (OzoneAclType type: OzoneAclType.values()) {
-      for (Map.Entry<String, OzoneAclRights> entry :
-          aclMaps.get(type.ordinal()).entrySet()) {
-        OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder()
-            .setName(entry.getKey())
-            .setType(type)
-            .setRights(entry.getValue())
-            .build();
-        aclList.add(aclInfo);
-      }
-    }
-
-    return aclList;
-  }
-
-  // Create map from list of OzoneAclInfos
-  public static KsmOzoneAclMap ozoneAclGetFromProtobuf(
-      List<OzoneAclInfo> aclList) {
-    KsmOzoneAclMap aclMap = new KsmOzoneAclMap();
-    for (OzoneAclInfo acl : aclList) {
-      aclMap.addAcl(acl);
-    }
-    return aclMap;
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDDS-217. Move all SCMEvents to a package. Contributed by Anu Engineer.

Posted by vi...@apache.org.
HDDS-217. Move all SCMEvents to a package.
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f51cd60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f51cd60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f51cd60

Branch: refs/heads/HDFS-12090
Commit: 2f51cd60ef082cd0360fe46e9d2a4ec9b8ed979a
Parents: 936e0df
Author: Anu Engineer <ae...@apache.org>
Authored: Sun Jul 8 11:11:21 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sun Jul 8 11:11:21 2018 -0700

----------------------------------------------------------------------
 .../container/CloseContainerEventHandler.java   | 13 ++--
 .../hadoop/hdds/scm/events/SCMEvents.java       | 80 ++++++++++++++++++++
 .../hadoop/hdds/scm/events/package-info.java    | 23 ++++++
 .../hadoop/hdds/scm/node/SCMNodeManager.java    |  5 +-
 .../server/SCMDatanodeHeartbeatDispatcher.java  | 11 ++-
 .../scm/server/StorageContainerManager.java     |  7 +-
 .../TestCloseContainerEventHandler.java         | 27 ++++---
 .../hadoop/hdds/scm/node/TestNodeManager.java   | 10 +--
 .../TestSCMDatanodeHeartbeatDispatcher.java     | 20 +++--
 9 files changed, 147 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index 7b24538..f1053d5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -24,15 +24,14 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
  * In case of a node failure, volume failure, volume out of spapce, node
- * out of space etc, CLOSE_CONTAINER_EVENT will be triggered.
- * CloseContainerEventHandler is the handler for CLOSE_CONTAINER_EVENT.
+ * out of space etc, CLOSE_CONTAINER will be triggered.
+ * CloseContainerEventHandler is the handler for CLOSE_CONTAINER.
  * When a close container event is fired, a close command for the container
  * should be sent to all the datanodes in the pipeline and containerStateManager
  * needs to update the container state to Closing.
@@ -42,8 +41,6 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
   public static final Logger LOG =
       LoggerFactory.getLogger(CloseContainerEventHandler.class);
 
-  public static final TypedEvent<ContainerID> CLOSE_CONTAINER_EVENT =
-            new TypedEvent<>(ContainerID.class);
 
   private final Mapping containerManager;
 
@@ -59,7 +56,8 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
     ContainerWithPipeline containerWithPipeline = null;
     ContainerInfo info;
     try {
-      containerWithPipeline = containerManager.getContainerWithPipeline(containerID.getId());
+      containerWithPipeline =
+          containerManager.getContainerWithPipeline(containerID.getId());
       info = containerWithPipeline.getContainerInfo();
       if (info == null) {
         LOG.info("Failed to update the container state. Container with id : {} "
@@ -73,7 +71,8 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
     }
 
     if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
-      for (DatanodeDetails datanode : containerWithPipeline.getPipeline().getMachines()) {
+      for (DatanodeDetails datanode :
+          containerWithPipeline.getPipeline().getMachines()) {
         containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(),
             new CloseContainerCommand(containerID.getId(),
                 info.getReplicationType()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
new file mode 100644
index 0000000..2c9c431
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.events;
+
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
+
+import org.apache.hadoop.hdds.server.events.Event;
+import org.apache.hadoop.hdds.server.events.TypedEvent;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+
+/**
+ * Class that acts as the namespace for all SCM Events.
+ */
+public final class SCMEvents {
+
+  /**
+   * NodeReports are  sent out by Datanodes. This report is
+   * received by SCMDatanodeHeartbeatDispatcher and NodeReport Event is
+   * generated.
+   */
+  public static final TypedEvent<NodeReportFromDatanode> NODE_REPORT =
+      new TypedEvent<>(NodeReportFromDatanode.class, "Node_Report");
+  /**
+   * ContainerReports are send out by Datanodes. This report
+   * is received by SCMDatanodeHeartbeatDispatcher and Container_Report Event
+   * i generated.
+   */
+  public static final TypedEvent<ContainerReportFromDatanode> CONTAINER_REPORT =
+      new TypedEvent<>(ContainerReportFromDatanode.class, "Container_Report");
+
+  /**
+   * When ever a command for the Datanode needs to be issued by any component
+   * inside SCM, a Datanode_Command event is generated. NodeManager listens
+   * to these events and dispatches them to Datanode for further processing.
+   */
+  public static final Event<CommandForDatanode> DATANODE_COMMAND =
+      new TypedEvent<>(CommandForDatanode.class, "Datanode_Command");
+
+  /**
+   * A Close Container Event can be triggered under many condition.
+   * Some of them are:
+   *    1. A Container is full, then we stop writing further information to
+   *    that container. DN's let SCM know that current state and sends a
+   *    informational message that allows SCM to close the container.
+   *
+   *    2. If a pipeline is open; for example Ratis; if a single node fails,
+   *    we will proactively close these containers.
+   *
+   *  Once a command is dispatched to DN, we will also listen to updates from
+   *  the datanode which lets us know that this command completed or timed out.
+   */
+  public static final TypedEvent<ContainerID> CLOSE_CONTAINER =
+      new TypedEvent<>(ContainerID.class, "Close_Container");
+
+  /**
+   * Private Ctor. Never Constructed.
+   */
+  private SCMEvents() {
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
new file mode 100644
index 0000000..46181a3
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+/**
+ * Events Package contains all the Events used by SCM internally to
+ * communicate between different sub-systems that make up SCM.
+ */
+package org.apache.hadoop.hdds.scm.events;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 15ac3f2..664a80f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -25,10 +25,8 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
-import org.apache.hadoop.hdds.server.events.Event;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -118,8 +116,7 @@ public class SCMNodeManager
   // Node pool manager.
   private final StorageContainerManager scmManager;
 
-  public static final Event<CommandForDatanode> DATANODE_COMMAND =
-      new TypedEvent<>(CommandForDatanode.class, "DATANODE_COMMAND");
+
 
   /**
    * Constructs SCM machine Manager.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index f221584..a6354af 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -25,12 +25,14 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdds.server.events.TypedEvent;
 
 import com.google.protobuf.GeneratedMessage;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
+
 /**
  * This class is responsible for dispatching heartbeat from datanode to
  * appropriate EventHandler at SCM.
@@ -42,11 +44,6 @@ public final class SCMDatanodeHeartbeatDispatcher {
 
   private EventPublisher eventPublisher;
 
-  public static final TypedEvent<NodeReportFromDatanode> NODE_REPORT =
-      new TypedEvent<>(NodeReportFromDatanode.class);
-
-  public static final TypedEvent<ContainerReportFromDatanode> CONTAINER_REPORT =
-      new TypedEvent<ContainerReportFromDatanode>(ContainerReportFromDatanode.class);
 
   public SCMDatanodeHeartbeatDispatcher(EventPublisher eventPublisher) {
     this.eventPublisher = eventPublisher;
@@ -63,12 +60,14 @@ public final class SCMDatanodeHeartbeatDispatcher {
         DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails());
     // should we dispatch heartbeat through eventPublisher?
     if (heartbeat.hasNodeReport()) {
+      LOG.debug("Dispatching Node Report.");
       eventPublisher.fireEvent(NODE_REPORT,
           new NodeReportFromDatanode(datanodeDetails,
               heartbeat.getNodeReport()));
     }
 
     if (heartbeat.hasContainerReport()) {
+      LOG.debug("Dispatching Container Report.");
       eventPublisher.fireEvent(CONTAINER_REPORT,
           new ContainerReportFromDatanode(datanodeDetails,
               heartbeat.getContainerReport()));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 568a86a..49d3a40 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -70,6 +70,8 @@ import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
+
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
@@ -164,9 +166,10 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     }
     EventQueue eventQueue = new EventQueue();
 
-    SCMNodeManager nm = new SCMNodeManager(conf, scmStorage.getClusterID(), this);
+    SCMNodeManager nm =
+        new SCMNodeManager(conf, scmStorage.getClusterID(), this);
     scmNodeManager = nm;
-    eventQueue.addHandler(SCMNodeManager.DATANODE_COMMAND, nm);
+    eventQueue.addHandler(DATANODE_COMMAND, nm);
 
     scmContainerManager = new ContainerMapping(conf, getScmNodeManager(),
         cacheSize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 721dbf6..0d46ffa 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -17,11 +17,13 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
+import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers
+    .ContainerWithPipeline;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -33,12 +35,12 @@ import org.junit.Test;
 
 import java.io.File;
 import java.io.IOException;
-import java.util.Random;
 
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATE;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CREATED;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
 
 /**
  * Tests the closeContainerEventHandler class.
@@ -65,7 +67,7 @@ public class TestCloseContainerEventHandler {
     nodeManager = new MockNodeManager(true, 10);
     mapping = new ContainerMapping(configuration, nodeManager, 128);
     eventQueue = new EventQueue();
-    eventQueue.addHandler(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
+    eventQueue.addHandler(CLOSE_CONTAINER,
         new CloseContainerEventHandler(mapping));
   }
 
@@ -81,8 +83,8 @@ public class TestCloseContainerEventHandler {
   public void testIfCloseContainerEventHadnlerInvoked() {
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
-        new ContainerID(Math.abs(new Random().nextLong())));
+    eventQueue.fireEvent(CLOSE_CONTAINER,
+        new ContainerID(Math.abs(RandomUtils.nextInt())));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
         .contains("Close container Event triggered for container"));
@@ -90,10 +92,10 @@ public class TestCloseContainerEventHandler {
 
   @Test
   public void testCloseContainerEventWithInvalidContainer() {
-    long id = Math.abs(new Random().nextLong());
+    long id = Math.abs(RandomUtils.nextInt());
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
+    eventQueue.fireEvent(CLOSE_CONTAINER,
         new ContainerID(id));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
@@ -112,7 +114,7 @@ public class TestCloseContainerEventHandler {
         containerWithPipeline.getContainerInfo().getContainerID());
     DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
     int closeCount = nodeManager.getCommandCount(datanode);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
+    eventQueue.fireEvent(CLOSE_CONTAINER, id);
     eventQueue.processAll(1000);
     // At this point of time, the allocated container is not in open
     // state, so firing close container event should not queue CLOSE
@@ -125,11 +127,12 @@ public class TestCloseContainerEventHandler {
     //Execute these state transitions so that we can close the container.
     mapping.updateContainerState(id.getId(), CREATE);
     mapping.updateContainerState(id.getId(), CREATED);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
+    eventQueue.fireEvent(CLOSE_CONTAINER,
         new ContainerID(
             containerWithPipeline.getContainerInfo().getContainerID()));
     eventQueue.processAll(1000);
-    Assert.assertEquals(closeCount + 1, nodeManager.getCommandCount(datanode));
+    Assert.assertEquals(closeCount + 1,
+        nodeManager.getCommandCount(datanode));
     Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
         mapping.getStateManager().getContainer(id).getState());
   }
@@ -145,7 +148,7 @@ public class TestCloseContainerEventHandler {
     ContainerID id = new ContainerID(
         containerWithPipeline.getContainerInfo().getContainerID());
     int[] closeCount = new int[3];
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
+    eventQueue.fireEvent(CLOSE_CONTAINER, id);
     eventQueue.processAll(1000);
     int i = 0;
     for (DatanodeDetails details : containerWithPipeline.getPipeline()
@@ -166,7 +169,7 @@ public class TestCloseContainerEventHandler {
     //Execute these state transitions so that we can close the container.
     mapping.updateContainerState(id.getId(), CREATE);
     mapping.updateContainerState(id.getId(), CREATED);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
+    eventQueue.fireEvent(CLOSE_CONTAINER, id);
     eventQueue.processAll(1000);
     i = 0;
     // Make sure close is queued for each datanode on the pipeline

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 0a4e33d..d72309e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -68,6 +68,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
     .HEALTHY;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
 import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.core.StringStartsWith.startsWith;
 import static org.junit.Assert.assertEquals;
@@ -1068,11 +1069,6 @@ public class TestNodeManager {
       foundRemaining = nodeManager.getStats().getRemaining().get();
       assertEquals(0, foundRemaining);
 
-      // Send a new report to bring the dead node back to healthy
-      String storagePath = testDir.getAbsolutePath() + "/" + dnId;
-      List<StorageReportProto> reports = TestUtils
-          .createStorageReport(capacity, expectedScmUsed, expectedRemaining,
-              storagePath, null, dnId, 1);
       nodeManager.processHeartbeat(datanodeDetails);
 
       // Wait up to 5 seconds so that the dead node becomes healthy
@@ -1111,11 +1107,11 @@ public class TestNodeManager {
 
     EventQueue eq = new EventQueue();
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
-      eq.addHandler(SCMNodeManager.DATANODE_COMMAND, nodemanager);
+      eq.addHandler(DATANODE_COMMAND, nodemanager);
 
       nodemanager
           .register(datanodeDetails, TestUtils.createNodeReport(reports));
-      eq.fireEvent(SCMNodeManager.DATANODE_COMMAND,
+      eq.fireEvent(DATANODE_COMMAND,
           new CommandForDatanode(datanodeDetails.getUuid(),
               new CloseContainerCommand(1L, ReplicationType.STAND_ALONE)));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f51cd60/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
index 326a34b..a77ed04 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdds.scm.server;
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
@@ -40,6 +38,9 @@ import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.CONTAINER_REPORT;
+import static org.apache.hadoop.hdds.scm.events.SCMEvents.NODE_REPORT;
+
 /**
  * This class tests the behavior of SCMDatanodeHeartbeatDispatcher.
  */
@@ -49,8 +50,6 @@ public class TestSCMDatanodeHeartbeatDispatcher {
   @Test
   public void testNodeReportDispatcher() throws IOException {
 
-    Configuration conf = new OzoneConfiguration();
-
     AtomicInteger eventReceived = new AtomicInteger();
 
     NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
@@ -60,10 +59,10 @@ public class TestSCMDatanodeHeartbeatDispatcher {
           @Override
           public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
               EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertEquals(event,
-                SCMDatanodeHeartbeatDispatcher.NODE_REPORT);
+            Assert.assertEquals(event, NODE_REPORT);
             eventReceived.incrementAndGet();
-            Assert.assertEquals(nodeReport, ((NodeReportFromDatanode)payload).getReport());
+            Assert.assertEquals(nodeReport,
+                ((NodeReportFromDatanode)payload).getReport());
 
           }
         });
@@ -84,7 +83,6 @@ public class TestSCMDatanodeHeartbeatDispatcher {
   @Test
   public void testContainerReportDispatcher() throws IOException {
 
-    Configuration conf = new OzoneConfiguration();
 
     AtomicInteger eventReceived = new AtomicInteger();
 
@@ -96,9 +94,9 @@ public class TestSCMDatanodeHeartbeatDispatcher {
           @Override
           public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void fireEvent(
               EVENT_TYPE event, PAYLOAD payload) {
-            Assert.assertEquals(event,
-                SCMDatanodeHeartbeatDispatcher.CONTAINER_REPORT);
-            Assert.assertEquals(containerReport, ((ContainerReportFromDatanode)payload).getReport());
+            Assert.assertEquals(event, CONTAINER_REPORT);
+            Assert.assertEquals(containerReport,
+                ((ContainerReportFromDatanode)payload).getReport());
             eventReceived.incrementAndGet();
           }
         });


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
new file mode 100644
index 0000000..7c5b6db
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.w3c.dom.Element;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlFloat;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * XML documents.
+ */
+public final class AppInfoXmlVerifications {
+
+  private AppInfoXmlVerifications() {
+    //utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param info
+   * @param  app  an RMApp instance that contains the required values
+   */
+  public static void verify(Element info, RMApp app) {
+    checkStringMatch("id", app.getApplicationId()
+            .toString(), getXmlString(info, "id"));
+    checkStringMatch("user", app.getUser(),
+            getXmlString(info, "user"));
+    checkStringMatch("name", app.getName(),
+            getXmlString(info, "name"));
+    checkStringMatch("applicationType",
+            app.getApplicationType(), getXmlString(info, "applicationType"));
+    checkStringMatch("queue", app.getQueue(),
+            getXmlString(info, "queue"));
+    assertEquals("priority doesn't match", 0, getXmlInt(info, "priority"));
+    checkStringMatch("state", app.getState().toString(),
+            getXmlString(info, "state"));
+    checkStringMatch("finalStatus", app
+            .getFinalApplicationStatus().toString(),
+            getXmlString(info, "finalStatus"));
+    assertEquals("progress doesn't match", 0, getXmlFloat(info, "progress"),
+        0.0);
+    if ("UNASSIGNED".equals(getXmlString(info, "trackingUI"))) {
+      checkStringMatch("trackingUI", "UNASSIGNED",
+              getXmlString(info, "trackingUI"));
+    }
+    WebServicesTestUtils.checkStringEqual("diagnostics",
+            app.getDiagnostics().toString(), getXmlString(info, "diagnostics"));
+    assertEquals("clusterId doesn't match",
+            ResourceManager.getClusterTimeStamp(),
+            getXmlLong(info, "clusterId"));
+    assertEquals("startedTime doesn't match", app.getStartTime(),
+            getXmlLong(info, "startedTime"));
+    assertEquals("finishedTime doesn't match", app.getFinishTime(),
+            getXmlLong(info, "finishedTime"));
+    assertTrue("elapsed time not greater than 0",
+            getXmlLong(info, "elapsedTime") > 0);
+    checkStringMatch("amHostHttpAddress", app
+                    .getCurrentAppAttempt().getMasterContainer()
+                    .getNodeHttpAddress(),
+            getXmlString(info, "amHostHttpAddress"));
+    assertTrue("amContainerLogs doesn't match",
+        getXmlString(info, "amContainerLogs").startsWith("http://"));
+    assertTrue("amContainerLogs doesn't contain user info",
+        getXmlString(info, "amContainerLogs").endsWith("/" + app.getUser()));
+    assertEquals("allocatedMB doesn't match", 1024,
+            getXmlInt(info, "allocatedMB"));
+    assertEquals("allocatedVCores doesn't match", 1,
+            getXmlInt(info, "allocatedVCores"));
+    assertEquals("queueUsagePerc doesn't match", 50.0f,
+            getXmlFloat(info, "queueUsagePercentage"), 0.01f);
+    assertEquals("clusterUsagePerc doesn't match", 50.0f,
+            getXmlFloat(info, "clusterUsagePercentage"), 0.01f);
+    assertEquals("numContainers doesn't match", 1,
+        getXmlInt(info, "runningContainers"));
+    assertNotNull("preemptedResourceSecondsMap should not be null",
+            info.getElementsByTagName("preemptedResourceSecondsMap"));
+    assertEquals("preemptedResourceMB doesn't match", app
+                    .getRMAppMetrics().getResourcePreempted().getMemorySize(),
+            getXmlInt(info, "preemptedResourceMB"));
+    assertEquals("preemptedResourceVCores doesn't match", app
+                    .getRMAppMetrics().getResourcePreempted().getVirtualCores(),
+            getXmlInt(info, "preemptedResourceVCores"));
+    assertEquals("numNonAMContainerPreempted doesn't match", app
+                    .getRMAppMetrics().getNumNonAMContainersPreempted(),
+            getXmlInt(info, "numNonAMContainerPreempted"));
+    assertEquals("numAMContainerPreempted doesn't match", app
+                    .getRMAppMetrics().getNumAMContainersPreempted(),
+            getXmlInt(info, "numAMContainerPreempted"));
+    assertEquals("Log aggregation Status doesn't match", app
+                    .getLogAggregationStatusForAppReport().toString(),
+            getXmlString(info, "logAggregationStatus"));
+    assertEquals("unmanagedApplication doesn't match", app
+                    .getApplicationSubmissionContext().getUnmanagedAM(),
+            getXmlBoolean(info, "unmanagedApplication"));
+    assertEquals("unmanagedApplication doesn't match",
+            app.getApplicationSubmissionContext().getNodeLabelExpression(),
+            getXmlString(info, "appNodeLabelExpression"));
+    assertEquals("unmanagedApplication doesn't match",
+            app.getAMResourceRequests().get(0).getNodeLabelExpression(),
+            getXmlString(info, "amNodeLabelExpression"));
+    assertEquals("amRPCAddress",
+            AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
+            getXmlString(info, "amRPCAddress"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
new file mode 100644
index 0000000..a8990ca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.UniformInterfaceException;
+
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+
+/**
+ * This class is merely a wrapper for {@link ClientResponse}. Given that the
+ * entity input stream of {@link ClientResponse} can be read only once by
+ * default and for some tests it is convenient to read the input stream many
+ * times, this class hides the details of how to do that and prevents
+ * unnecessary code duplication in tests.
+ */
+public class BufferedClientResponse {
+  private ClientResponse response;
+
+  public BufferedClientResponse(ClientResponse response) {
+    response.bufferEntity();
+    this.response = response;
+  }
+
+  public <T> T getEntity(Class<T> clazz)
+          throws ClientHandlerException, UniformInterfaceException {
+    try {
+      response.getEntityInputStream().reset();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    return response.getEntity(clazz);
+  }
+
+  public MediaType getType() {
+    return response.getType();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
new file mode 100644
index 0000000..9d6a111
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.http.JettyUtils;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.core.MediaType;
+
+import java.util.function.Consumer;
+
+import static org.junit.Assert.*;
+
+/**
+ * This class hides the implementation details of how to verify the structure of
+ * JSON responses. Tests should only provide the path of the
+ * {@link WebResource}, the response from the resource and
+ * the verifier Consumer to
+ * {@link JsonCustomResourceTypeTestcase#verify(Consumer)}. An instance of
+ * {@link JSONObject} will be passed to that consumer to be able to
+ * verify the response.
+ */
+public class JsonCustomResourceTypeTestcase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JsonCustomResourceTypeTestcase.class);
+
+  private final WebResource path;
+  private final BufferedClientResponse response;
+  private final JSONObject parsedResponse;
+
+  public JsonCustomResourceTypeTestcase(WebResource path,
+                                        BufferedClientResponse response) {
+    this.path = path;
+    this.response = response;
+    this.parsedResponse = response.getEntity(JSONObject.class);
+  }
+
+  public void verify(Consumer<JSONObject> verifier) {
+    assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+
+    logResponse();
+
+    String responseStr = response.getEntity(String.class);
+    if (responseStr == null || responseStr.isEmpty()) {
+      throw new IllegalStateException("Response is null or empty!");
+    }
+    verifier.accept(parsedResponse);
+  }
+
+  private void logResponse() {
+    String responseStr = response.getEntity(String.class);
+    LOG.info("Raw response from service URL {}: {}", path.toString(),
+        responseStr);
+    LOG.info("Parsed response from service URL {}: {}", path.toString(),
+        parsedResponse);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
new file mode 100644
index 0000000..6e58a89
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+
+import java.util.List;
+import java.util.Map;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performs value verifications on
+ * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
+ * objects against the values of {@link ResourceRequest}. With the help of the
+ * {@link Builder}, users can also make verifications of the custom resource
+ * types and its values.
+ */
+public class ResourceRequestsJsonVerifications {
+  private final ResourceRequest resourceRequest;
+  private final JSONObject requestInfo;
+  private final Map<String, Long> customResourceTypes;
+  private final List<String> expectedCustomResourceTypes;
+
+  ResourceRequestsJsonVerifications(Builder builder) {
+    this.resourceRequest = builder.resourceRequest;
+    this.requestInfo = builder.requestInfo;
+    this.customResourceTypes = builder.customResourceTypes;
+    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
+  }
+
+  public static void verify(JSONObject requestInfo, ResourceRequest rr)
+      throws JSONException {
+    createDefaultBuilder(requestInfo, rr).build().verify();
+  }
+
+  public static void verifyWithCustomResourceTypes(JSONObject requestInfo,
+      ResourceRequest resourceRequest, List<String> expectedResourceTypes)
+      throws JSONException {
+
+    createDefaultBuilder(requestInfo, resourceRequest)
+        .withExpectedCustomResourceTypes(expectedResourceTypes)
+        .withCustomResourceTypes(
+            extractActualCustomResourceTypes(requestInfo, expectedResourceTypes))
+        .build().verify();
+  }
+
+  private static Builder createDefaultBuilder(JSONObject requestInfo,
+      ResourceRequest resourceRequest) {
+    return new ResourceRequestsJsonVerifications.Builder()
+            .withRequest(resourceRequest)
+            .withRequestInfoJson(requestInfo);
+  }
+
+  private static Map<String, Long> extractActualCustomResourceTypes(
+      JSONObject requestInfo, List<String> expectedResourceTypes)
+      throws JSONException {
+    JSONObject capability = requestInfo.getJSONObject("capability");
+    Map<String, Long> resourceAndValue =
+        extractCustomResorceTypeValues(capability, expectedResourceTypes);
+    Map.Entry<String, Long> resourceEntry =
+        resourceAndValue.entrySet().iterator().next();
+
+    assertTrue(
+        "Found resource type: " + resourceEntry.getKey()
+            + " is not in expected resource types: " + expectedResourceTypes,
+        expectedResourceTypes.contains(resourceEntry.getKey()));
+
+    return resourceAndValue;
+  }
+
+  private static Map<String, Long> extractCustomResorceTypeValues(
+      JSONObject capability, List<String> expectedResourceTypes)
+      throws JSONException {
+    assertTrue(
+        "resourceCategory does not have resourceInformations: " + capability,
+        capability.has("resourceInformations"));
+
+    JSONObject resourceInformations =
+        capability.getJSONObject("resourceInformations");
+    assertTrue(
+        "resourceInformations does not have resourceInformation object: "
+            + resourceInformations,
+        resourceInformations.has("resourceInformation"));
+    JSONArray customResources =
+        resourceInformations.getJSONArray("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        expectedResourceTypes.size(), customResources.length() - 2);
+
+    Map<String, Long> resourceValues = Maps.newHashMap();
+    for (int i = 0; i < customResources.length(); i++) {
+      JSONObject customResource = customResources.getJSONObject(i);
+      assertTrue("Resource type does not have name field: " + customResource,
+          customResource.has("name"));
+      assertTrue("Resource type does not have name resourceType field: "
+          + customResource, customResource.has("resourceType"));
+      assertTrue(
+          "Resource type does not have name units field: " + customResource,
+          customResource.has("units"));
+      assertTrue(
+          "Resource type does not have name value field: " + customResource,
+          customResource.has("value"));
+
+      String name = customResource.getString("name");
+      String unit = customResource.getString("units");
+      String resourceType = customResource.getString("resourceType");
+      Long value = customResource.getLong("value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          expectedResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Custom resource value " + value + " is null!", value);
+      resourceValues.put(name, value);
+    }
+
+    return resourceValues;
+  }
+
+  private void verify() throws JSONException {
+    assertEquals("nodeLabelExpression doesn't match",
+        resourceRequest.getNodeLabelExpression(),
+            requestInfo.getString("nodeLabelExpression"));
+    assertEquals("numContainers doesn't match",
+            resourceRequest.getNumContainers(),
+            requestInfo.getInt("numContainers"));
+    assertEquals("relaxLocality doesn't match",
+            resourceRequest.getRelaxLocality(),
+            requestInfo.getBoolean("relaxLocality"));
+    assertEquals("priority does not match",
+            resourceRequest.getPriority().getPriority(),
+            requestInfo.getInt("priority"));
+    assertEquals("resourceName does not match",
+            resourceRequest.getResourceName(),
+            requestInfo.getString("resourceName"));
+    assertEquals("memory does not match",
+        resourceRequest.getCapability().getMemorySize(),
+            requestInfo.getJSONObject("capability").getLong("memory"));
+    assertEquals("vCores does not match",
+        resourceRequest.getCapability().getVirtualCores(),
+            requestInfo.getJSONObject("capability").getLong("vCores"));
+
+    verifyAtLeastOneCustomResourceIsSerialized();
+
+    JSONObject executionTypeRequest =
+            requestInfo.getJSONObject("executionTypeRequest");
+    assertEquals("executionType does not match",
+        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
+            executionTypeRequest.getString("executionType"));
+    assertEquals("enforceExecutionType does not match",
+            resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
+            executionTypeRequest.getBoolean("enforceExecutionType"));
+  }
+
+  /**
+   * JSON serialization produces "invalid JSON" by default as maps are
+   * serialized like this:
+   * "customResources":{"entry":{"key":"customResource-1","value":"0"}}
+   * If the map has multiple keys then multiple entries will be serialized.
+   * Our json parser in tests cannot handle duplicates therefore only one
+   * custom resource will be in the parsed json. See:
+   * https://issues.apache.org/jira/browse/YARN-7505
+   */
+  private void verifyAtLeastOneCustomResourceIsSerialized() {
+    boolean resourceFound = false;
+    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
+      if (customResourceTypes.containsKey(expectedCustomResourceType)) {
+        resourceFound = true;
+        Long resourceValue =
+            customResourceTypes.get(expectedCustomResourceType);
+        assertNotNull("Resource value should not be null!", resourceValue);
+      }
+    }
+    assertTrue("No custom resource type can be found in the response!",
+        resourceFound);
+  }
+
+  /**
+   * Builder class for {@link ResourceRequestsJsonVerifications}.
+   */
+  public static final class Builder {
+    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
+    private Map<String, Long> customResourceTypes;
+    private ResourceRequest resourceRequest;
+    private JSONObject requestInfo;
+
+    Builder() {
+    }
+
+    public static Builder create() {
+      return new Builder();
+    }
+
+    Builder withExpectedCustomResourceTypes(
+            List<String> expectedCustomResourceTypes) {
+      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
+      return this;
+    }
+
+    Builder withCustomResourceTypes(
+            Map<String, Long> customResourceTypes) {
+      this.customResourceTypes = customResourceTypes;
+      return this;
+    }
+
+    Builder withRequest(ResourceRequest resourceRequest) {
+      this.resourceRequest = resourceRequest;
+      return this;
+    }
+
+    Builder withRequestInfoJson(JSONObject requestInfo) {
+      this.requestInfo = requestInfo;
+      return this;
+    }
+
+    public ResourceRequestsJsonVerifications build() {
+      return new ResourceRequestsJsonVerifications(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
new file mode 100644
index 0000000..af9b0f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performs value verifications on
+ * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
+ * objects against the values of {@link ResourceRequest}. With the help of the
+ * {@link Builder}, users can also make verifications of the custom resource
+ * types and its values.
+ */
+public class ResourceRequestsXmlVerifications {
+  private final ResourceRequest resourceRequest;
+  private final Element requestInfo;
+  private final Map<String, Long> customResourceTypes;
+  private final List<String> expectedCustomResourceTypes;
+
+  ResourceRequestsXmlVerifications(Builder builder) {
+    this.resourceRequest = builder.resourceRequest;
+    this.requestInfo = builder.requestInfo;
+    this.customResourceTypes = builder.customResourceTypes;
+    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
+  }
+
+  public static void verifyWithCustomResourceTypes(Element requestInfo,
+      ResourceRequest resourceRequest, List<String> expectedResourceTypes) {
+
+    createDefaultBuilder(requestInfo, resourceRequest)
+        .withExpectedCustomResourceTypes(expectedResourceTypes)
+        .withCustomResourceTypes(extractActualCustomResourceType(requestInfo,
+            expectedResourceTypes))
+        .build().verify();
+  }
+
+  private static Builder createDefaultBuilder(Element requestInfo,
+      ResourceRequest resourceRequest) {
+    return new ResourceRequestsXmlVerifications.Builder()
+        .withRequest(resourceRequest).withRequestInfo(requestInfo);
+  }
+
+  private static Map<String, Long> extractActualCustomResourceType(
+      Element requestInfo, List<String> expectedResourceTypes) {
+    Element capability =
+        (Element) requestInfo.getElementsByTagName("capability").item(0);
+
+    return extractCustomResorceTypes(capability,
+        Sets.newHashSet(expectedResourceTypes));
+  }
+
+  private static Map<String, Long> extractCustomResorceTypes(Element capability,
+      Set<String> expectedResourceTypes) {
+    assertEquals(
+        toXml(capability) + " should have only one resourceInformations child!",
+        1, capability.getElementsByTagName("resourceInformations").getLength());
+    Element resourceInformations = (Element) capability
+        .getElementsByTagName("resourceInformations").item(0);
+
+    NodeList customResources =
+        resourceInformations.getElementsByTagName("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        expectedResourceTypes.size(), customResources.getLength() - 2);
+
+    Map<String, Long> resourceTypesAndValues = Maps.newHashMap();
+    for (int i = 0; i < customResources.getLength(); i++) {
+      Element customResource = (Element) customResources.item(i);
+      String name = getXmlString(customResource, "name");
+      String unit = getXmlString(customResource, "units");
+      String resourceType = getXmlString(customResource, "resourceType");
+      Long value = getXmlLong(customResource, "value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          expectedResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Resource value should not be null for resource type "
+          + resourceType + ", listing xml contents: " + toXml(customResource),
+          value);
+      resourceTypesAndValues.put(name, value);
+    }
+
+    return resourceTypesAndValues;
+  }
+
+  private void verify() {
+    assertEquals("nodeLabelExpression doesn't match",
+        resourceRequest.getNodeLabelExpression(),
+        getXmlString(requestInfo, "nodeLabelExpression"));
+    assertEquals("numContainers doesn't match",
+        resourceRequest.getNumContainers(),
+        getXmlInt(requestInfo, "numContainers"));
+    assertEquals("relaxLocality doesn't match",
+        resourceRequest.getRelaxLocality(),
+        getXmlBoolean(requestInfo, "relaxLocality"));
+    assertEquals("priority does not match",
+        resourceRequest.getPriority().getPriority(),
+        getXmlInt(requestInfo, "priority"));
+    assertEquals("resourceName does not match",
+        resourceRequest.getResourceName(),
+        getXmlString(requestInfo, "resourceName"));
+    Element capability = (Element) requestInfo
+            .getElementsByTagName("capability").item(0);
+    assertEquals("memory does not match",
+        resourceRequest.getCapability().getMemorySize(),
+        getXmlLong(capability, "memory"));
+    assertEquals("vCores does not match",
+        resourceRequest.getCapability().getVirtualCores(),
+        getXmlLong(capability, "vCores"));
+
+    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
+      assertTrue(
+          "Custom resource type " + expectedCustomResourceType
+              + " cannot be found!",
+          customResourceTypes.containsKey(expectedCustomResourceType));
+
+      Long resourceValue = customResourceTypes.get(expectedCustomResourceType);
+      assertNotNull("Resource value should not be null!", resourceValue);
+    }
+
+    Element executionTypeRequest = (Element) requestInfo
+        .getElementsByTagName("executionTypeRequest").item(0);
+    assertEquals("executionType does not match",
+        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
+        getXmlString(executionTypeRequest, "executionType"));
+    assertEquals("enforceExecutionType does not match",
+        resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
+        getXmlBoolean(executionTypeRequest, "enforceExecutionType"));
+  }
+
+  /**
+   * Builder class for {@link ResourceRequestsXmlVerifications}.
+   */
+  public static final class Builder {
+    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
+    private Map<String, Long> customResourceTypes;
+    private ResourceRequest resourceRequest;
+    private Element requestInfo;
+
+    Builder() {
+    }
+
+    public static Builder create() {
+      return new Builder();
+    }
+
+    Builder withExpectedCustomResourceTypes(
+        List<String> expectedCustomResourceTypes) {
+      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
+      return this;
+    }
+
+    Builder withCustomResourceTypes(Map<String, Long> customResourceTypes) {
+      this.customResourceTypes = customResourceTypes;
+      return this;
+    }
+
+    Builder withRequest(ResourceRequest resourceRequest) {
+      this.resourceRequest = resourceRequest;
+      return this;
+    }
+
+    Builder withRequestInfo(Element requestInfo) {
+      this.requestInfo = requestInfo;
+      return this;
+    }
+
+    public ResourceRequestsXmlVerifications build() {
+      return new ResourceRequestsXmlVerifications(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129e3e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
new file mode 100644
index 0000000..29260aa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.http.JettyUtils;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Node;
+import org.xml.sax.InputSource;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.transform.*;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.function.Consumer;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class hides the implementation details of how to verify the structure of
+ * XML responses. Tests should only provide the path of the
+ * {@link WebResource}, the response from the resource and
+ * the verifier Consumer to
+ * {@link XmlCustomResourceTypeTestCase#verify(Consumer)}. An instance of
+ * {@link JSONObject} will be passed to that consumer to be able to
+ * verify the response.
+ */
+public class XmlCustomResourceTypeTestCase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(XmlCustomResourceTypeTestCase.class);
+
+  private WebResource path;
+  private BufferedClientResponse response;
+  private Document parsedResponse;
+
+  public XmlCustomResourceTypeTestCase(WebResource path,
+                                       BufferedClientResponse response) {
+    this.path = path;
+    this.response = response;
+  }
+
+  public void verify(Consumer<Document> verifier) {
+    assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+
+    parsedResponse = parseXml(response);
+    logResponse(parsedResponse);
+    verifier.accept(parsedResponse);
+  }
+
+  private Document parseXml(BufferedClientResponse response) {
+    try {
+      String xml = response.getEntity(String.class);
+      DocumentBuilder db =
+          DocumentBuilderFactory.newInstance().newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+
+      return db.parse(is);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void logResponse(Document doc) {
+    String responseStr = response.getEntity(String.class);
+    LOG.info("Raw response from service URL {}: {}", path.toString(),
+        responseStr);
+    LOG.info("Parsed response from service URL {}: {}", path.toString(),
+        toXml(doc));
+  }
+
+  public static String toXml(Node node) {
+    StringWriter writer;
+    try {
+      TransformerFactory tf = TransformerFactory.newInstance();
+      Transformer transformer = tf.newTransformer();
+      transformer.setOutputProperty(OutputKeys.INDENT, "yes");
+      transformer.setOutputProperty(
+          "{http://xml.apache.org/xslt}indent" + "-amount", "2");
+      writer = new StringWriter();
+      transformer.transform(new DOMSource(node), new StreamResult(writer));
+    } catch (TransformerException e) {
+      throw new RuntimeException(e);
+    }
+
+    return writer.getBuffer().toString();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi.

Posted by vi...@apache.org.
HDFS-13719. Docs around dfs.image.transfer.timeout are misleading. Contributed by Kitti Nansi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eecb5baa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eecb5baa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eecb5baa

Branch: refs/heads/HDFS-12090
Commit: eecb5baaaaa54599aeae758abd4007e55e5b531f
Parents: 43f7fe8
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Jul 9 15:17:21 2018 +0200
Committer: Andrew Wang <wa...@apache.org>
Committed: Mon Jul 9 15:17:21 2018 +0200

----------------------------------------------------------------------
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml    | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eecb5baa/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 6dd2d92..384cedf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1289,11 +1289,10 @@
   <name>dfs.image.transfer.timeout</name>
   <value>60000</value>
   <description>
-        Socket timeout for image transfer in milliseconds. This timeout and the related
-        dfs.image.transfer.bandwidthPerSec parameter should be configured such
-        that normal image transfer can complete successfully.
-        This timeout prevents client hangs when the sender fails during
-        image transfer. This is socket timeout during image transfer.
+        Socket timeout for the HttpURLConnection instance used in the image
+        transfer. This is measured in milliseconds.
+        This timeout prevents client hangs if the connection is idle
+        for this configured timeout, during image transfer.
   </description>
 </property>
 
@@ -1304,9 +1303,7 @@
         Maximum bandwidth used for regular image transfers (instead of
         bootstrapping the standby namenode), in bytes per second.
         This can help keep normal namenode operations responsive during
-        checkpointing. The maximum bandwidth and timeout in
-        dfs.image.transfer.timeout should be set such that normal image
-        transfers can complete successfully.
+        checkpointing.
         A default value of 0 indicates that throttling is disabled.
         The maximum bandwidth used for bootstrapping standby namenode is
         configured with dfs.image.transfer-bootstrap-standby.bandwidthPerSec.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: HADOOP-15215 s3guard set-capacity command to fail on read/write of 0 (Gabor Bota)

Posted by vi...@apache.org.
HADOOP-15215 s3guard set-capacity command to fail on read/write of 0 (Gabor Bota)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93ac01cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93ac01cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93ac01cb

Branch: refs/heads/HDFS-12090
Commit: 93ac01cb59b99b84b4f1ff26c089dcb5ce1b7c89
Parents: c0ef7e7
Author: Aaron Fabbri <fa...@apache.org>
Authored: Tue Jul 3 13:50:11 2018 -0700
Committer: Aaron Fabbri <fa...@apache.org>
Committed: Tue Jul 3 13:50:11 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/fs/s3a/s3guard/S3GuardTool.java | 10 ++++++++++
 .../s3a/s3guard/AbstractS3GuardToolTestBase.java  | 18 ++++++++++++++++++
 2 files changed, 28 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93ac01cb/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index 527697f..19dc32a 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -439,6 +439,10 @@ public abstract class S3GuardTool extends Configured implements Tool {
   static class SetCapacity extends S3GuardTool {
     public static final String NAME = "set-capacity";
     public static final String PURPOSE = "Alter metadata store IO capacity";
+    public static final String READ_CAP_INVALID = "Read capacity must have "
+        + "value greater than or equal to 1.";
+    public static final String WRITE_CAP_INVALID = "Write capacity must have "
+        + "value greater than or equal to 1.";
     private static final String USAGE = NAME + " [OPTIONS] [s3a://BUCKET]\n" +
         "\t" + PURPOSE + "\n\n" +
         "Common options:\n" +
@@ -478,11 +482,17 @@ public abstract class S3GuardTool extends Configured implements Tool {
 
       String readCap = getCommandFormat().getOptValue(READ_FLAG);
       if (StringUtils.isNotEmpty(readCap)) {
+        Preconditions.checkArgument(Integer.parseInt(readCap) > 0,
+            READ_CAP_INVALID);
+
         S3GuardTool.println(out, "Read capacity set to %s", readCap);
         options.put(S3GUARD_DDB_TABLE_CAPACITY_READ_KEY, readCap);
       }
       String writeCap = getCommandFormat().getOptValue(WRITE_FLAG);
       if (StringUtils.isNotEmpty(writeCap)) {
+        Preconditions.checkArgument(Integer.parseInt(writeCap) > 0,
+            WRITE_CAP_INVALID);
+
         S3GuardTool.println(out, "Write capacity set to %s", writeCap);
         options.put(S3GUARD_DDB_TABLE_CAPACITY_WRITE_KEY, writeCap);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/93ac01cb/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
index 7d75f52..f591e32 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.StringUtils;
 
+import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_DDB_TABLE_NAME_KEY;
 import static org.apache.hadoop.fs.s3a.Constants.S3GUARD_METASTORE_NULL;
 import static org.apache.hadoop.fs.s3a.Constants.S3_METADATA_STORE_IMPL;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.E_BAD_STATE;
@@ -287,6 +288,23 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
   }
 
   @Test
+  public void testSetCapacityFailFast() throws Exception{
+    Configuration conf = getConfiguration();
+    conf.set(S3GUARD_DDB_TABLE_NAME_KEY, getFileSystem().getBucket());
+
+    S3GuardTool.SetCapacity cmdR = new S3GuardTool.SetCapacity(conf);
+    String[] argsR = new String[]{cmdR.getName(), "-read", "0", "s3a://bucket"};
+    intercept(IllegalArgumentException.class,
+        S3GuardTool.SetCapacity.READ_CAP_INVALID, () -> cmdR.run(argsR));
+
+    S3GuardTool.SetCapacity cmdW = new S3GuardTool.SetCapacity(conf);
+    String[] argsW = new String[]{cmdW.getName(), "-write", "0",
+        "s3a://bucket"};
+    intercept(IllegalArgumentException.class,
+        S3GuardTool.SetCapacity.WRITE_CAP_INVALID, () -> cmdW.run(argsW));
+  }
+
+  @Test
   public void testDestroyNoBucket() throws Throwable {
     intercept(FileNotFoundException.class,
         new Callable<Integer>() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HDDS-212. Introduce NodeStateManager to manage the state of Datanodes in SCM. Contributed by Nanda kumar.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
new file mode 100644
index 0000000..dd91866
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java
@@ -0,0 +1,281 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.node.states;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
+
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Maintains the state of datanodes in SCM. This class should only be used by
+ * NodeStateManager to maintain the state. If anyone wants to change the
+ * state of a node they should call NodeStateManager, do not directly use
+ * this class.
+ */
+public class NodeStateMap {
+
+  /**
+   * Node id to node info map.
+   */
+  private final ConcurrentHashMap<UUID, DatanodeInfo> nodeMap;
+  /**
+   * Represents the current state of node.
+   */
+  private final ConcurrentHashMap<NodeState, Set<UUID>> stateMap;
+  private final ReadWriteLock lock;
+
+  /**
+   * Creates a new instance of NodeStateMap with no nodes.
+   */
+  public NodeStateMap() {
+    lock = new ReentrantReadWriteLock();
+    nodeMap = new ConcurrentHashMap<>();
+    stateMap = new ConcurrentHashMap<>();
+    initStateMap();
+  }
+
+  /**
+   * Initializes the state map with available states.
+   */
+  private void initStateMap() {
+    for (NodeState state : NodeState.values()) {
+      stateMap.put(state, new HashSet<>());
+    }
+  }
+
+  /**
+   * Adds a node to NodeStateMap.
+   *
+   * @param datanodeDetails DatanodeDetails
+   * @param nodeState initial NodeState
+   *
+   * @throws NodeAlreadyExistsException if the node already exist
+   */
+  public void addNode(DatanodeDetails datanodeDetails, NodeState nodeState)
+      throws NodeAlreadyExistsException {
+    lock.writeLock().lock();
+    try {
+      UUID id = datanodeDetails.getUuid();
+      if (nodeMap.containsKey(id)) {
+        throw new NodeAlreadyExistsException("Node UUID: " + id);
+      }
+      nodeMap.put(id, new DatanodeInfo(datanodeDetails));
+      stateMap.get(nodeState).add(id);
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Updates the node state.
+   *
+   * @param nodeId Node Id
+   * @param currentState current state
+   * @param newState new state
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public void updateNodeState(UUID nodeId, NodeState currentState,
+                              NodeState newState)throws NodeNotFoundException {
+    lock.writeLock().lock();
+    try {
+      if (stateMap.get(currentState).remove(nodeId)) {
+        stateMap.get(newState).add(nodeId);
+      } else {
+        throw new NodeNotFoundException("Node UUID: " + nodeId +
+            ", not found in state: " + currentState);
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Returns DatanodeDetails for the given node id.
+   *
+   * @param uuid Node Id
+   *
+   * @return DatanodeDetails of the node
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public DatanodeDetails getNodeDetails(UUID uuid)
+      throws NodeNotFoundException {
+    return getNodeInfo(uuid);
+  }
+
+  /**
+   * Returns DatanodeInfo for the given node id.
+   *
+   * @param uuid Node Id
+   *
+   * @return DatanodeInfo of the node
+   *
+   * @throws NodeNotFoundException if the node is not present
+   */
+  public DatanodeInfo getNodeInfo(UUID uuid) throws NodeNotFoundException {
+    lock.readLock().lock();
+    try {
+      if (nodeMap.containsKey(uuid)) {
+        return nodeMap.get(uuid);
+      }
+      throw new NodeNotFoundException("Node UUID: " + uuid);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+
+  /**
+   * Returns the list of node ids which are in the specified state.
+   *
+   * @param state NodeState
+   *
+   * @return list of node ids
+   */
+  public List<UUID> getNodes(NodeState state) {
+    lock.readLock().lock();
+    try {
+      return new LinkedList<>(stateMap.get(state));
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Returns the list of all the node ids.
+   *
+   * @return list of all the node ids
+   */
+  public List<UUID> getAllNodes() {
+    lock.readLock().lock();
+    try {
+      return new LinkedList<>(nodeMap.keySet());
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Returns the count of nodes in the specified state.
+   *
+   * @param state NodeState
+   *
+   * @return Number of nodes in the specified state
+   */
+  public int getNodeCount(NodeState state) {
+    lock.readLock().lock();
+    try {
+      return stateMap.get(state).size();
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Returns the total node count.
+   *
+   * @return node count
+   */
+  public int getTotalNodeCount() {
+    lock.readLock().lock();
+    try {
+      return nodeMap.size();
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Returns the current state of the node.
+   *
+   * @param uuid node id
+   *
+   * @return NodeState
+   *
+   * @throws NodeNotFoundException if the node is not found
+   */
+  public NodeState getNodeState(UUID uuid) throws NodeNotFoundException {
+    lock.readLock().lock();
+    try {
+      for (Map.Entry<NodeState, Set<UUID>> entry : stateMap.entrySet()) {
+        if (entry.getValue().contains(uuid)) {
+          return entry.getKey();
+        }
+      }
+      throw new NodeNotFoundException("Node UUID: " + uuid);
+    } finally {
+      lock.readLock().unlock();
+    }
+  }
+
+  /**
+   * Removes the node from NodeStateMap.
+   *
+   * @param uuid node id
+   *
+   * @throws NodeNotFoundException if the node is not found
+   */
+  public void removeNode(UUID uuid) throws NodeNotFoundException {
+    lock.writeLock().lock();
+    try {
+      if (nodeMap.containsKey(uuid)) {
+        for (Map.Entry<NodeState, Set<UUID>> entry : stateMap.entrySet()) {
+          if(entry.getValue().remove(uuid)) {
+            break;
+          }
+          nodeMap.remove(uuid);
+        }
+        throw new NodeNotFoundException("Node UUID: " + uuid);
+      }
+    } finally {
+      lock.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Since we don't hold a global lock while constructing this string,
+   * the result might be inconsistent. If someone has changed the state of node
+   * while we are constructing the string, the result will be inconsistent.
+   * This should only be used for logging. We should not parse this string and
+   * use it for any critical calculations.
+   *
+   * @return current state of NodeStateMap
+   */
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("Total number of nodes: ").append(getTotalNodeCount());
+    for (NodeState state : NodeState.values()) {
+      builder.append("Number of nodes in ").append(state).append(" state: ")
+          .append(getNodeCount(state));
+    }
+    return builder.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index e1d478f..aefcf1b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.EnumSet;
+import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
@@ -188,27 +188,21 @@ public class SCMClientProtocolServer implements
   }
 
   @Override
-  public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState>
-      nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) throws
+  public List<HddsProtos.Node> queryNode(HddsProtos.NodeState state,
+      HddsProtos.QueryScope queryScope, String poolName) throws
       IOException {
 
     if (queryScope == HddsProtos.QueryScope.POOL) {
       throw new IllegalArgumentException("Not Supported yet");
     }
 
-    List<DatanodeDetails> datanodes = queryNode(nodeStatuses);
-    HddsProtos.NodePool.Builder poolBuilder = HddsProtos.NodePool.newBuilder();
+    List<HddsProtos.Node> result = new ArrayList<>();
+    queryNode(state).forEach(node -> result.add(HddsProtos.Node.newBuilder()
+        .setNodeID(node.getProtoBufMessage())
+        .addNodeStates(state)
+        .build()));
 
-    for (DatanodeDetails datanode : datanodes) {
-      HddsProtos.Node node =
-          HddsProtos.Node.newBuilder()
-              .setNodeID(datanode.getProtoBufMessage())
-              .addAllNodeStates(nodeStatuses)
-              .build();
-      poolBuilder.addNodes(node);
-    }
-
-    return poolBuilder.build();
+    return result;
 
   }
 
@@ -282,35 +276,12 @@ public class SCMClientProtocolServer implements
    * operation between the
    * operators.
    *
-   * @param nodeStatuses - A set of NodeStates.
+   * @param state - NodeStates.
    * @return List of Datanodes.
    */
-  public List<DatanodeDetails> queryNode(EnumSet<HddsProtos.NodeState>
-      nodeStatuses) {
-    Preconditions.checkNotNull(nodeStatuses, "Node Query set cannot be null");
-    Preconditions.checkState(nodeStatuses.size() > 0, "No valid arguments " +
-        "in the query set");
-    List<DatanodeDetails> resultList = new LinkedList<>();
-    Set<DatanodeDetails> currentSet = new TreeSet<>();
-
-    for (HddsProtos.NodeState nodeState : nodeStatuses) {
-      Set<DatanodeDetails> nextSet = queryNodeState(nodeState);
-      if ((nextSet == null) || (nextSet.size() == 0)) {
-        // Right now we only support AND operation. So intersect with
-        // any empty set is null.
-        return resultList;
-      }
-      // First time we have to add all the elements, next time we have to
-      // do an intersection operation on the set.
-      if (currentSet.size() == 0) {
-        currentSet.addAll(nextSet);
-      } else {
-        currentSet.retainAll(nextSet);
-      }
-    }
-
-    resultList.addAll(currentSet);
-    return resultList;
+  public List<DatanodeDetails> queryNode(HddsProtos.NodeState state) {
+    Preconditions.checkNotNull(state, "Node Query set cannot be null");
+    return new LinkedList<>(queryNodeState(state));
   }
 
   @VisibleForTesting
@@ -325,11 +296,6 @@ public class SCMClientProtocolServer implements
    * @return Set of Datanodes that match the NodeState.
    */
   private Set<DatanodeDetails> queryNodeState(HddsProtos.NodeState nodeState) {
-    if (nodeState == HddsProtos.NodeState.RAFT_MEMBER || nodeState ==
-        HddsProtos.NodeState
-        .FREE_NODE) {
-      throw new IllegalStateException("Not implemented yet");
-    }
     Set<DatanodeDetails> returnSet = new TreeSet<>();
     List<DatanodeDetails> tmp = scm.getScmNodeManager().getNodes(nodeState);
     if ((tmp != null) && (tmp.size() > 0)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index 36f10a9..f221584 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -61,7 +61,7 @@ public final class SCMDatanodeHeartbeatDispatcher {
   public void dispatch(SCMHeartbeatRequestProto heartbeat) {
     DatanodeDetails datanodeDetails =
         DatanodeDetails.getFromProtoBuf(heartbeat.getDatanodeDetails());
-
+    // should we dispatch heartbeat through eventPublisher?
     if (heartbeat.hasNodeReport()) {
       eventPublisher.fireEvent(NODE_REPORT,
           new NodeReportFromDatanode(datanodeDetails,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 56b0719..aef5b03 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -223,7 +223,7 @@ public class SCMDatanodeProtocolServer implements
         .getFromProtoBuf(heartbeat.getDatanodeDetails());
     NodeReportProto nodeReport = heartbeat.getNodeReport();
     List<SCMCommand> commands =
-        scm.getScmNodeManager().sendHeartbeat(datanodeDetails, nodeReport);
+        scm.getScmNodeManager().processHeartbeat(datanodeDetails);
     List<SCMCommandProto> cmdResponses = new LinkedList<>();
     for (SCMCommand cmd : commands) {
       cmdResponses.add(getCommandResponse(cmd));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 80b5d6e..3357992 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -19,21 +19,18 @@ package org.apache.hadoop.hdds.scm.container;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.StorageReportProto;
-import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.assertj.core.util.Preconditions;
-import org.mockito.Mockito;
 
 import java.io.IOException;
 import java.util.HashMap;
@@ -130,11 +127,11 @@ public class MockNodeManager implements NodeManager {
    * Removes a data node from the management of this Node Manager.
    *
    * @param node - DataNode.
-   * @throws UnregisteredNodeException
+   * @throws NodeNotFoundException
    */
   @Override
   public void removeNode(DatanodeDetails node)
-      throws UnregisteredNodeException {
+      throws NodeNotFoundException {
 
   }
 
@@ -273,16 +270,6 @@ public class MockNodeManager implements NodeManager {
   }
 
   /**
-   * Used for testing.
-   *
-   * @return true if the HB check is done.
-   */
-  @Override
-  public boolean waitForHeartbeatProcessed() {
-    return false;
-  }
-
-  /**
    * Returns the node state of a specific node.
    *
    * @param dd - DatanodeDetails
@@ -335,21 +322,6 @@ public class MockNodeManager implements NodeManager {
   }
 
   /**
-   * When an object implementing interface <code>Runnable</code> is used to
-   * create a thread, starting the thread causes the object's <code>run</code>
-   * method to be called in that separately executing thread.
-   * <p>
-   * The general contract of the method <code>run</code> is that it may take any
-   * action whatsoever.
-   *
-   * @see Thread#run()
-   */
-  @Override
-  public void run() {
-
-  }
-
-  /**
    * Gets the version info from SCM.
    *
    * @param versionRequest - version Request.
@@ -379,32 +351,10 @@ public class MockNodeManager implements NodeManager {
    * Send heartbeat to indicate the datanode is alive and doing well.
    *
    * @param datanodeDetails - Datanode ID.
-   * @param nodeReport - node report.
    * @return SCMheartbeat response list
    */
   @Override
-  public List<SCMCommand> sendHeartbeat(DatanodeDetails datanodeDetails,
-      NodeReportProto nodeReport) {
-    if ((datanodeDetails != null) && (nodeReport != null) && (nodeReport
-        .getStorageReportCount() > 0)) {
-      SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails.getUuid());
-
-      long totalCapacity = 0L;
-      long totalRemaining = 0L;
-      long totalScmUsed = 0L;
-      List<StorageReportProto> storageReports = nodeReport
-          .getStorageReportList();
-      for (StorageReportProto report : storageReports) {
-        totalCapacity += report.getCapacity();
-        totalRemaining += report.getRemaining();
-        totalScmUsed += report.getScmUsed();
-      }
-      aggregateStat.subtract(stat);
-      stat.set(totalCapacity, totalScmUsed, totalRemaining);
-      aggregateStat.add(stat);
-      nodeMetricMap.put(datanodeDetails.getUuid(), stat);
-
-    }
+  public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 98b0a28..c6ea2af 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -36,8 +36,8 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -109,6 +109,7 @@ public class TestContainerPlacement {
    * @throws TimeoutException
    */
   @Test
+  @Ignore
   public void testContainerPlacementCapacity() throws IOException,
       InterruptedException, TimeoutException {
     OzoneConfiguration conf = getConf();
@@ -135,12 +136,11 @@ public class TestContainerPlacement {
         String path = testDir.getAbsolutePath() + "/" + id;
         List<StorageReportProto> reports = TestUtils
             .createStorageReport(capacity, used, remaining, path, null, id, 1);
-        nodeManager.sendHeartbeat(datanodeDetails,
-            TestUtils.createNodeReport(reports));
+        nodeManager.processHeartbeat(datanodeDetails);
       }
 
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
-          100, 4 * 1000);
+      //TODO: wait for heartbeat to be processed
+      Thread.sleep(4 * 1000);
       assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
       assertEquals(capacity * nodeCount,
           (long) nodeManager.getStats().getCapacity().get());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 824a135..0a4e33d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -41,6 +41,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
@@ -62,8 +63,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_MAX_HB_COUNT_TO_PROCESS;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_STALENODE_INTERVAL;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
@@ -148,14 +147,11 @@ public class TestNodeManager {
       for (int x = 0; x < nodeManager.getMinimumChillModeNodes(); x++) {
         DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
             nodeManager);
-        nodeManager.sendHeartbeat(datanodeDetails,
-            null);
+        nodeManager.processHeartbeat(datanodeDetails);
       }
 
-      // Wait for 4 seconds max.
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
-          100, 4 * 1000);
-
+      //TODO: wait for heartbeat to be processed
+      Thread.sleep(4 * 1000);
       assertTrue("Heartbeat thread should have picked up the" +
               "scheduled heartbeats and transitioned out of chill mode.",
           nodeManager.isOutOfChillMode());
@@ -174,8 +170,8 @@ public class TestNodeManager {
       InterruptedException, TimeoutException {
 
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
-          100, 4 * 1000);
+      //TODO: wait for heartbeat to be processed
+      Thread.sleep(4 * 1000);
       assertFalse("No heartbeats, Node manager should have been in" +
           " chill mode.", nodeManager.isOutOfChillMode());
     }
@@ -195,10 +191,9 @@ public class TestNodeManager {
 
       // Need 100 nodes to come out of chill mode, only one node is sending HB.
       nodeManager.setMinimumChillModeNodes(100);
-      nodeManager.sendHeartbeat(TestUtils.getDatanodeDetails(nodeManager),
-          null);
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
-          100, 4 * 1000);
+      nodeManager.processHeartbeat(TestUtils.getDatanodeDetails(nodeManager));
+      //TODO: wait for heartbeat to be processed
+      Thread.sleep(4 * 1000);
       assertFalse("Not enough heartbeat, Node manager should have" +
           "been in chillmode.", nodeManager.isOutOfChillMode());
     }
@@ -223,12 +218,11 @@ public class TestNodeManager {
 
       // Send 10 heartbeat from same node, and assert we never leave chill mode.
       for (int x = 0; x < 10; x++) {
-        nodeManager.sendHeartbeat(datanodeDetails,
-            null);
+        nodeManager.processHeartbeat(datanodeDetails);
       }
 
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
-          100, 4 * 1000);
+      //TODO: wait for heartbeat to be processed
+      Thread.sleep(4 * 1000);
       assertFalse("Not enough nodes have send heartbeat to node" +
           "manager.", nodeManager.isOutOfChillMode());
     }
@@ -254,14 +248,12 @@ public class TestNodeManager {
     nodeManager.close();
 
     // These should never be processed.
-    nodeManager.sendHeartbeat(datanodeDetails,
-        null);
+    nodeManager.processHeartbeat(datanodeDetails);
 
     // Let us just wait for 2 seconds to prove that HBs are not processed.
     Thread.sleep(2 * 1000);
 
-    assertEquals("Assert new HBs were never processed", 0,
-        nodeManager.getLastHBProcessedCount());
+    //TODO: add assertion
   }
 
   /**
@@ -283,8 +275,7 @@ public class TestNodeManager {
     try (SCMNodeManager nodemanager = createNodeManager(conf)) {
       nodemanager.register(datanodeDetails,
           TestUtils.createNodeReport(reports));
-      List<SCMCommand> command = nodemanager.sendHeartbeat(
-          datanodeDetails, null);
+      List<SCMCommand> command = nodemanager.processHeartbeat(datanodeDetails);
       Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails));
       Assert.assertTrue("On regular HB calls, SCM responses a "
           + "datanode with an empty command list", command.isEmpty());
@@ -302,8 +293,7 @@ public class TestNodeManager {
         GenericTestUtils.waitFor(new Supplier<Boolean>() {
           @Override public Boolean get() {
             List<SCMCommand> command =
-                nodemanager.sendHeartbeat(datanodeDetails,
-                    null);
+                nodemanager.processHeartbeat(datanodeDetails);
             return command.size() == 1 && command.get(0).getType()
                 .equals(SCMCommandProto.Type.reregisterCommand);
           }
@@ -334,11 +324,10 @@ public class TestNodeManager {
       for (int x = 0; x < count; x++) {
         DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
             nodeManager);
-        nodeManager.sendHeartbeat(datanodeDetails,
-            null);
+        nodeManager.processHeartbeat(datanodeDetails);
       }
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
-          100, 4 * 1000);
+      //TODO: wait for heartbeat to be processed
+      Thread.sleep(4 * 1000);
       assertEquals(count, nodeManager.getNodeCount(HEALTHY));
     }
   }
@@ -426,19 +415,18 @@ public class TestNodeManager {
       DatanodeDetails staleNode = TestUtils.getDatanodeDetails(nodeManager);
 
       // Heartbeat once
-      nodeManager.sendHeartbeat(staleNode,
-          null);
+      nodeManager.processHeartbeat(staleNode);
 
       // Heartbeat all other nodes.
       for (DatanodeDetails dn : nodeList) {
-        nodeManager.sendHeartbeat(dn, null);
+        nodeManager.processHeartbeat(dn);
       }
 
       // Wait for 2 seconds .. and heartbeat good nodes again.
       Thread.sleep(2 * 1000);
 
       for (DatanodeDetails dn : nodeList) {
-        nodeManager.sendHeartbeat(dn, null);
+        nodeManager.processHeartbeat(dn);
       }
 
       // Wait for 2 seconds, wait a total of 4 seconds to make sure that the
@@ -455,7 +443,7 @@ public class TestNodeManager {
 
       // heartbeat good nodes again.
       for (DatanodeDetails dn : nodeList) {
-        nodeManager.sendHeartbeat(dn, null);
+        nodeManager.processHeartbeat(dn);
       }
 
       //  6 seconds is the dead window for this test , so we wait a total of
@@ -491,7 +479,7 @@ public class TestNodeManager {
   public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException,
       InterruptedException, TimeoutException {
     try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
-      nodeManager.sendHeartbeat(null, null);
+      nodeManager.processHeartbeat(null);
     } catch (NullPointerException npe) {
       GenericTestUtils.assertExceptionContains("Heartbeat is missing " +
           "DatanodeDetails.", npe);
@@ -568,12 +556,9 @@ public class TestNodeManager {
           TestUtils.getDatanodeDetails(nodeManager);
       DatanodeDetails deadNode =
           TestUtils.getDatanodeDetails(nodeManager);
-      nodeManager.sendHeartbeat(
-          healthyNode, null);
-      nodeManager.sendHeartbeat(
-          staleNode, null);
-      nodeManager.sendHeartbeat(
-          deadNode, null);
+      nodeManager.processHeartbeat(healthyNode);
+      nodeManager.processHeartbeat(staleNode);
+      nodeManager.processHeartbeat(deadNode);
 
       // Sleep so that heartbeat processing thread gets to run.
       Thread.sleep(500);
@@ -599,16 +584,12 @@ public class TestNodeManager {
        * the 3 second windows.
        */
 
-      nodeManager.sendHeartbeat(
-          healthyNode, null);
-      nodeManager.sendHeartbeat(
-          staleNode, null);
-      nodeManager.sendHeartbeat(
-          deadNode, null);
+      nodeManager.processHeartbeat(healthyNode);
+      nodeManager.processHeartbeat(staleNode);
+      nodeManager.processHeartbeat(deadNode);
 
       Thread.sleep(1500);
-      nodeManager.sendHeartbeat(
-          healthyNode, null);
+      nodeManager.processHeartbeat(healthyNode);
       Thread.sleep(2 * 1000);
       assertEquals(1, nodeManager.getNodeCount(HEALTHY));
 
@@ -628,13 +609,10 @@ public class TestNodeManager {
        * staleNode to move to stale state and deadNode to move to dead state.
        */
 
-      nodeManager.sendHeartbeat(
-          healthyNode, null);
-      nodeManager.sendHeartbeat(
-          staleNode, null);
+      nodeManager.processHeartbeat(healthyNode);
+      nodeManager.processHeartbeat(staleNode);
       Thread.sleep(1500);
-      nodeManager.sendHeartbeat(
-          healthyNode, null);
+      nodeManager.processHeartbeat(healthyNode);
       Thread.sleep(2 * 1000);
 
       // 3.5 seconds have elapsed for stale node, so it moves into Stale.
@@ -667,12 +645,9 @@ public class TestNodeManager {
        * Cluster State : let us heartbeat all the nodes and verify that we get
        * back all the nodes in healthy state.
        */
-      nodeManager.sendHeartbeat(
-          healthyNode, null);
-      nodeManager.sendHeartbeat(
-          staleNode, null);
-      nodeManager.sendHeartbeat(
-          deadNode, null);
+      nodeManager.processHeartbeat(healthyNode);
+      nodeManager.processHeartbeat(staleNode);
+      nodeManager.processHeartbeat(deadNode);
       Thread.sleep(500);
       //Assert all nodes are healthy.
       assertEquals(3, nodeManager.getAllNodes().size());
@@ -693,7 +668,7 @@ public class TestNodeManager {
                                 int sleepDuration) throws InterruptedException {
     while (!Thread.currentThread().isInterrupted()) {
       for (DatanodeDetails dn : list) {
-        manager.sendHeartbeat(dn, null);
+        manager.processHeartbeat(dn);
       }
       Thread.sleep(sleepDuration);
     }
@@ -747,7 +722,6 @@ public class TestNodeManager {
     conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
     conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
     conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
-    conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 7000);
 
 
     try (SCMNodeManager nodeManager = createNodeManager(conf)) {
@@ -779,7 +753,7 @@ public class TestNodeManager {
       // No Thread just one time HBs the node manager, so that these will be
       // marked as dead nodes eventually.
       for (DatanodeDetails dn : deadNodeList) {
-        nodeManager.sendHeartbeat(dn, null);
+        nodeManager.processHeartbeat(dn);
       }
 
 
@@ -883,54 +857,6 @@ public class TestNodeManager {
     }
   }
 
-  /**
-   * Asserts that SCM backs off from HB processing instead of going into an
-   * infinite loop if SCM is flooded with too many heartbeats. This many not be
-   * the best thing to do, but SCM tries to protect itself and logs an error
-   * saying that it is getting flooded with heartbeats. In real world this can
-   * lead to many nodes becoming stale or dead due to the fact that SCM is not
-   * able to keep up with heartbeat processing. This test just verifies that SCM
-   * will log that information.
-   * @throws TimeoutException
-   */
-  @Test
-  public void testScmLogsHeartbeatFlooding() throws IOException,
-      InterruptedException, TimeoutException {
-    final int healthyCount = 3000;
-
-    // Make the HB process thread run slower.
-    OzoneConfiguration conf = getConf();
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500,
-        TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_INTERVAL, 1, SECONDS);
-    conf.setInt(OZONE_SCM_MAX_HB_COUNT_TO_PROCESS, 500);
-
-    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
-      List<DatanodeDetails> healthyList = createNodeSet(nodeManager,
-          healthyCount);
-      GenericTestUtils.LogCapturer logCapturer =
-          GenericTestUtils.LogCapturer.captureLogs(SCMNodeManager.LOG);
-      Runnable healthyNodeTask = () -> {
-        try {
-          // No wait in the HB sending loop.
-          heartbeatNodeSet(nodeManager, healthyList, 0);
-        } catch (InterruptedException ignored) {
-        }
-      };
-      Thread thread1 = new Thread(healthyNodeTask);
-      thread1.setDaemon(true);
-      thread1.start();
-
-      GenericTestUtils.waitFor(() -> logCapturer.getOutput()
-          .contains("SCM is being "
-              + "flooded by heartbeats. Not able to keep up"
-              + " with the heartbeat counts."),
-          500, 20 * 1000);
-
-      thread1.interrupt();
-      logCapturer.stopCapturing();
-    }
-  }
 
   @Test
   public void testScmEnterAndExitChillMode() throws IOException,
@@ -943,8 +869,7 @@ public class TestNodeManager {
       nodeManager.setMinimumChillModeNodes(10);
       DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
           nodeManager);
-      nodeManager.sendHeartbeat(
-          datanodeDetails, null);
+      nodeManager.processHeartbeat(datanodeDetails);
       String status = nodeManager.getChillModeStatus();
       Assert.assertThat(status, containsString("Still in chill " +
           "mode, waiting on nodes to report in."));
@@ -971,7 +896,7 @@ public class TestNodeManager {
       // Assert that node manager force enter cannot be overridden by nodes HBs.
       for (int x = 0; x < 20; x++) {
         DatanodeDetails datanode = TestUtils.getDatanodeDetails(nodeManager);
-        nodeManager.sendHeartbeat(datanode, null);
+        nodeManager.processHeartbeat(datanode);
       }
 
       Thread.sleep(500);
@@ -995,6 +920,8 @@ public class TestNodeManager {
    * @throws TimeoutException
    */
   @Test
+  @Ignore
+  // TODO: Enable this after we implement NodeReportEvent handler.
   public void testScmStatsFromNodeReport() throws IOException,
       InterruptedException, TimeoutException {
     OzoneConfiguration conf = getConf();
@@ -1015,11 +942,10 @@ public class TestNodeManager {
         List<StorageReportProto> reports = TestUtils
             .createStorageReport(capacity, used, free, storagePath,
                 null, dnId, 1);
-        nodeManager.sendHeartbeat(datanodeDetails,
-            TestUtils.createNodeReport(reports));
+        nodeManager.processHeartbeat(datanodeDetails);
       }
-      GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
-          100, 4 * 1000);
+      //TODO: wait for heartbeat to be processed
+      Thread.sleep(4 * 1000);
       assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
       assertEquals(capacity * nodeCount, (long) nodeManager.getStats()
           .getCapacity().get());
@@ -1038,6 +964,8 @@ public class TestNodeManager {
    * @throws TimeoutException
    */
   @Test
+  @Ignore
+  // TODO: Enable this after we implement NodeReportEvent handler.
   public void testScmNodeReportUpdate() throws IOException,
       InterruptedException, TimeoutException {
     OzoneConfiguration conf = getConf();
@@ -1065,8 +993,7 @@ public class TestNodeManager {
             .createStorageReport(capacity, scmUsed, remaining, storagePath,
                 null, dnId, 1);
 
-        nodeManager.sendHeartbeat(datanodeDetails,
-            TestUtils.createNodeReport(reports));
+        nodeManager.processHeartbeat(datanodeDetails);
         Thread.sleep(100);
       }
 
@@ -1146,8 +1073,7 @@ public class TestNodeManager {
       List<StorageReportProto> reports = TestUtils
           .createStorageReport(capacity, expectedScmUsed, expectedRemaining,
               storagePath, null, dnId, 1);
-      nodeManager.sendHeartbeat(datanodeDetails,
-          TestUtils.createNodeReport(reports));
+      nodeManager.processHeartbeat(datanodeDetails);
 
       // Wait up to 5 seconds so that the dead node becomes healthy
       // Verify usage info should be updated.
@@ -1195,7 +1121,7 @@ public class TestNodeManager {
 
       eq.processAll(1000L);
       List<SCMCommand> command =
-          nodemanager.sendHeartbeat(datanodeDetails, null);
+          nodemanager.processHeartbeat(datanodeDetails);
       Assert.assertEquals(1, command.size());
       Assert
           .assertEquals(command.get(0).getClass(), CloseContainerCommand.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index 1a4dcd7..e15e0fc 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.node.CommandQueue;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.protocol.proto
@@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.mockito.Mockito;
 
 import java.io.IOException;
 import java.util.List;
@@ -90,11 +89,11 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * Removes a data node from the management of this Node Manager.
    *
    * @param node - DataNode.
-   * @throws UnregisteredNodeException
+   * @throws NodeNotFoundException
    */
   @Override
   public void removeNode(DatanodeDetails node)
-      throws UnregisteredNodeException {
+      throws NodeNotFoundException {
     nodeStateMap.remove(node);
 
   }
@@ -202,16 +201,6 @@ public class ReplicationNodeManagerMock implements NodeManager {
 
 
   /**
-   * Wait for the heartbeat is processed by NodeManager.
-   *
-   * @return true if heartbeat has been processed.
-   */
-  @Override
-  public boolean waitForHeartbeatProcessed() {
-    return false;
-  }
-
-  /**
    * Returns the node state of a specific node.
    *
    * @param dd - DatanodeDetails
@@ -241,22 +230,6 @@ public class ReplicationNodeManagerMock implements NodeManager {
   }
 
   /**
-   * When an object implementing interface <code>Runnable</code> is used
-   * to create a thread, starting the thread causes the object's
-   * <code>run</code> method to be called in that separately executing
-   * thread.
-   * <p>
-   * The general contract of the method <code>run</code> is that it may
-   * take any action whatsoever.
-   *
-   * @see Thread#run()
-   */
-  @Override
-  public void run() {
-
-  }
-
-  /**
    * Gets the version info from SCM.
    *
    * @param versionRequest - version Request.
@@ -285,12 +258,10 @@ public class ReplicationNodeManagerMock implements NodeManager {
    * Send heartbeat to indicate the datanode is alive and doing well.
    *
    * @param dd - Datanode Details.
-   * @param nodeReport - node report.
    * @return SCMheartbeat response list
    */
   @Override
-  public List<SCMCommand> sendHeartbeat(DatanodeDetails dd,
-      NodeReportProto nodeReport) {
+  public List<SCMCommand> processHeartbeat(DatanodeDetails dd) {
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index d07097c..dd1a8de 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -303,8 +303,8 @@ public class TestStorageContainerManager {
     GenericTestUtils.waitFor(() -> {
       NodeManager nodeManager = cluster.getStorageContainerManager()
           .getScmNodeManager();
-      List<SCMCommand> commands = nodeManager.sendHeartbeat(
-          nodeManager.getNodes(NodeState.HEALTHY).get(0), null);
+      List<SCMCommand> commands = nodeManager.processHeartbeat(
+          nodeManager.getNodes(NodeState.HEALTHY).get(0));
 
       if (commands != null) {
         for (SCMCommand cmd : commands) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
index b999c92..22528e4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
@@ -26,7 +26,7 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.util.EnumSet;
+import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 import static java.util.concurrent.TimeUnit.SECONDS;
@@ -83,11 +83,10 @@ public class TestQueryNode {
 
   @Test
   public void testHealthyNodesCount() throws Exception {
-    HddsProtos.NodePool pool = scmClient.queryNode(
-        EnumSet.of(HEALTHY),
+    List<HddsProtos.Node> nodes = scmClient.queryNode(HEALTHY,
         HddsProtos.QueryScope.CLUSTER, "");
     assertEquals("Expected  live nodes", numOfDatanodes,
-        pool.getNodesCount());
+        nodes.size());
   }
 
   @Test(timeout = 10 * 1000L)
@@ -99,8 +98,8 @@ public class TestQueryNode {
             cluster.getStorageContainerManager().getNodeCount(STALE) == 2,
         100, 4 * 1000);
 
-    int nodeCount = scmClient.queryNode(EnumSet.of(STALE),
-        HddsProtos.QueryScope.CLUSTER, "").getNodesCount();
+    int nodeCount = scmClient.queryNode(STALE,
+        HddsProtos.QueryScope.CLUSTER, "").size();
     assertEquals("Mismatch of expected nodes count", 2, nodeCount);
 
     GenericTestUtils.waitFor(() ->
@@ -108,13 +107,13 @@ public class TestQueryNode {
         100, 4 * 1000);
 
     // Assert that we don't find any stale nodes.
-    nodeCount = scmClient.queryNode(EnumSet.of(STALE),
-        HddsProtos.QueryScope.CLUSTER, "").getNodesCount();
+    nodeCount = scmClient.queryNode(STALE,
+        HddsProtos.QueryScope.CLUSTER, "").size();
     assertEquals("Mismatch of expected nodes count", 0, nodeCount);
 
     // Assert that we find the expected number of dead nodes.
-    nodeCount = scmClient.queryNode(EnumSet.of(DEAD),
-        HddsProtos.QueryScope.CLUSTER, "").getNodesCount();
+    nodeCount = scmClient.queryNode(DEAD,
+        HddsProtos.QueryScope.CLUSTER, "").size();
     assertEquals("Mismatch of expected nodes count", 2, nodeCount);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71df8c27/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
index dc8fc91..5fa313b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java
@@ -78,7 +78,6 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
-import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -884,9 +883,8 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl
             .setValue(scmAddr.getPort()).build());
     services.add(scmServiceInfoBuilder.build());
 
-    List<HddsProtos.Node> nodes = scmContainerClient.queryNode(
-        EnumSet.of(HEALTHY), HddsProtos.QueryScope.CLUSTER, "")
-        .getNodesList();
+    List<HddsProtos.Node> nodes = scmContainerClient.queryNode(HEALTHY,
+        HddsProtos.QueryScope.CLUSTER, "");
 
     for (HddsProtos.Node node : nodes) {
       HddsProtos.DatanodeDetailsProto datanode = node.getNodeID();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
new file mode 100644
index 0000000..e50145d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
@@ -0,0 +1,390 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BatchOperation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_USER_MAX_VOLUME_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_USER_MAX_VOLUME;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+
+/**
+ * OM volume management code.
+ */
+public class VolumeManagerImpl implements VolumeManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(VolumeManagerImpl.class);
+
+  private final OMMetadataManager metadataManager;
+  private final int maxUserVolumeCount;
+
+  /**
+   * Constructor.
+   * @param conf - Ozone configuration.
+   * @throws IOException
+   */
+  public VolumeManagerImpl(OMMetadataManager metadataManager,
+      OzoneConfiguration conf) throws IOException {
+    this.metadataManager = metadataManager;
+    this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME,
+        OZONE_OM_USER_MAX_VOLUME_DEFAULT);
+  }
+
+  // Helpers to add and delete volume from user list
+  private void addVolumeToOwnerList(String volume, String owner,
+      BatchOperation batchOperation) throws IOException {
+    // Get the volume list
+    byte[] dbUserKey = metadataManager.getUserKey(owner);
+    byte[] volumeList  = metadataManager.get(dbUserKey);
+    List<String> prevVolList = new LinkedList<>();
+    if (volumeList != null) {
+      VolumeList vlist = VolumeList.parseFrom(volumeList);
+      prevVolList.addAll(vlist.getVolumeNamesList());
+    }
+
+    // Check the volume count
+    if (prevVolList.size() >= maxUserVolumeCount) {
+      LOG.debug("Too many volumes for user:{}", owner);
+      throw new OMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES);
+    }
+
+    // Add the new volume to the list
+    prevVolList.add(volume);
+    VolumeList newVolList = VolumeList.newBuilder()
+        .addAllVolumeNames(prevVolList).build();
+    batchOperation.put(dbUserKey, newVolList.toByteArray());
+  }
+
+  private void delVolumeFromOwnerList(String volume, String owner,
+                                      BatchOperation batchOperation)
+      throws IOException {
+    // Get the volume list
+    byte[] dbUserKey = metadataManager.getUserKey(owner);
+    byte[] volumeList  = metadataManager.get(dbUserKey);
+    List<String> prevVolList = new LinkedList<>();
+    if (volumeList != null) {
+      VolumeList vlist = VolumeList.parseFrom(volumeList);
+      prevVolList.addAll(vlist.getVolumeNamesList());
+    } else {
+      LOG.debug("volume:{} not found for user:{}");
+      throw new OMException(ResultCodes.FAILED_USER_NOT_FOUND);
+    }
+
+    // Remove the volume from the list
+    prevVolList.remove(volume);
+    if (prevVolList.size() == 0) {
+      batchOperation.delete(dbUserKey);
+    } else {
+      VolumeList newVolList = VolumeList.newBuilder()
+          .addAllVolumeNames(prevVolList).build();
+      batchOperation.put(dbUserKey, newVolList.toByteArray());
+    }
+  }
+
+  /**
+   * Creates a volume.
+   * @param args - OmVolumeArgs.
+   */
+  @Override
+  public void createVolume(OmVolumeArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(args.getVolume());
+      byte[] volumeInfo = metadataManager.get(dbVolumeKey);
+
+      // Check of the volume already exists
+      if (volumeInfo != null) {
+        LOG.debug("volume:{} already exists", args.getVolume());
+        throw new OMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS);
+      }
+
+      BatchOperation batch = new BatchOperation();
+      // Write the vol info
+      List<HddsProtos.KeyValue> metadataList = new LinkedList<>();
+      for (Map.Entry<String, String> entry : args.getKeyValueMap().entrySet()) {
+        metadataList.add(HddsProtos.KeyValue.newBuilder()
+            .setKey(entry.getKey()).setValue(entry.getValue()).build());
+      }
+      List<OzoneAclInfo> aclList = args.getAclMap().ozoneAclGetProtobuf();
+
+      VolumeInfo newVolumeInfo = VolumeInfo.newBuilder()
+          .setAdminName(args.getAdminName())
+          .setOwnerName(args.getOwnerName())
+          .setVolume(args.getVolume())
+          .setQuotaInBytes(args.getQuotaInBytes())
+          .addAllMetadata(metadataList)
+          .addAllVolumeAcls(aclList)
+          .setCreationTime(Time.now())
+          .build();
+      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
+
+      // Add volume to user list
+      addVolumeToOwnerList(args.getVolume(), args.getOwnerName(), batch);
+      metadataManager.writeBatch(batch);
+      LOG.debug("created volume:{} user:{}", args.getVolume(),
+          args.getOwnerName());
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Volume creation failed for user:{} volume:{}",
+            args.getOwnerName(), args.getVolume(), ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  @Override
+  public void setOwner(String volume, String owner) throws IOException {
+    Preconditions.checkNotNull(volume);
+    Preconditions.checkNotNull(owner);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("Changing volume ownership failed for user:{} volume:{}",
+            owner, volume);
+        throw  new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+
+      BatchOperation batch = new BatchOperation();
+      delVolumeFromOwnerList(volume, volumeArgs.getOwnerName(), batch);
+      addVolumeToOwnerList(volume, owner, batch);
+
+      OmVolumeArgs newVolumeArgs =
+          OmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume())
+              .setAdminName(volumeArgs.getAdminName())
+              .setOwnerName(owner)
+              .setQuotaInBytes(volumeArgs.getQuotaInBytes())
+              .setCreationTime(volumeArgs.getCreationTime())
+              .build();
+
+      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
+      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
+
+      metadataManager.writeBatch(batch);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Changing volume ownership failed for user:{} volume:{}",
+            owner, volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  public void setQuota(String volume, long quota) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+
+      OmVolumeArgs newVolumeArgs =
+          OmVolumeArgs.newBuilder()
+              .setVolume(volumeArgs.getVolume())
+              .setAdminName(volumeArgs.getAdminName())
+              .setOwnerName(volumeArgs.getOwnerName())
+              .setQuotaInBytes(quota)
+              .setCreationTime(volumeArgs.getCreationTime()).build();
+
+      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
+      metadataManager.put(dbVolumeKey, newVolumeInfo.toByteArray());
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
+            quota, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Gets the volume information.
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.readLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      return volumeArgs;
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.warn("Info volume failed for volume:{}", volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  @Override
+  public void deleteVolume(String volume) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.writeLock().lock();
+    try {
+      BatchOperation batch = new BatchOperation();
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      if (!metadataManager.isVolumeEmpty(volume)) {
+        LOG.debug("volume:{} is not empty", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_EMPTY);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      // delete the volume from the owner list
+      // as well as delete the volume entry
+      delVolumeFromOwnerList(volume, volumeInfo.getOwnerName(), batch);
+      batch.delete(dbVolumeKey);
+      metadataManager.writeBatch(batch);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Delete volume failed for volume:{}", volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Checks if the specified user with a role can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acl which needs to be checked for access
+   * @return true if the user has access for the volume, false otherwise
+   * @throws IOException
+   */
+  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException {
+    Preconditions.checkNotNull(volume);
+    Preconditions.checkNotNull(userAcl);
+    metadataManager.readLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw  new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      return volumeArgs.getAclMap().hasAccess(userAcl);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Check volume access failed for volume:{} user:{} rights:{}",
+            volume, userAcl.getName(), userAcl.getRights(), ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmVolumeArgs> listVolumes(String userName,
+                                        String prefix, String startKey, int maxKeys) throws IOException {
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.listVolumes(
+          userName, prefix, startKey, maxKeys);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
new file mode 100644
index 0000000..55cef97
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.exceptions;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown by Ozone Manager.
+ */
+public class OMException extends IOException {
+  private final OMException.ResultCodes result;
+
+  /**
+   * Constructs an {@code IOException} with {@code null}
+   * as its error detail message.
+   */
+  public OMException(OMException.ResultCodes result) {
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the
+   * {@link #getMessage()} method)
+   */
+  public OMException(String message, OMException.ResultCodes result) {
+    super(message);
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message
+   * and cause.
+   * <p>
+   * <p> Note that the detail message associated with {@code cause} is
+   * <i>not</i> automatically incorporated into this exception's detail
+   * message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the
+   * {@link #getMessage()} method)
+   * @param cause The cause (which is saved for later retrieval by the {@link
+   * #getCause()} method).  (A null value is permitted, and indicates that the
+   * cause is nonexistent or unknown.)
+   * @since 1.6
+   */
+  public OMException(String message, Throwable cause,
+                     OMException.ResultCodes result) {
+    super(message, cause);
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified cause and a
+   * detail message of {@code (cause==null ? null : cause.toString())}
+   * (which typically contains the class and detail message of {@code cause}).
+   * This constructor is useful for IO exceptions that are little more
+   * than wrappers for other throwables.
+   *
+   * @param cause The cause (which is saved for later retrieval by the {@link
+   * #getCause()} method).  (A null value is permitted, and indicates that the
+   * cause is nonexistent or unknown.)
+   * @since 1.6
+   */
+  public OMException(Throwable cause, OMException.ResultCodes result) {
+    super(cause);
+    this.result = result;
+  }
+
+  /**
+   * Returns resultCode.
+   * @return ResultCode
+   */
+  public OMException.ResultCodes getResult() {
+    return result;
+  }
+
+  /**
+   * Error codes to make it easy to decode these exceptions.
+   */
+  public enum ResultCodes {
+    FAILED_TOO_MANY_USER_VOLUMES,
+    FAILED_VOLUME_ALREADY_EXISTS,
+    FAILED_VOLUME_NOT_FOUND,
+    FAILED_VOLUME_NOT_EMPTY,
+    FAILED_USER_NOT_FOUND,
+    FAILED_BUCKET_ALREADY_EXISTS,
+    FAILED_BUCKET_NOT_FOUND,
+    FAILED_BUCKET_NOT_EMPTY,
+    FAILED_KEY_ALREADY_EXISTS,
+    FAILED_KEY_NOT_FOUND,
+    FAILED_KEY_ALLOCATION,
+    FAILED_KEY_DELETION,
+    FAILED_KEY_RENAME,
+    FAILED_INVALID_KEY_NAME,
+    FAILED_METADATA_ERROR,
+    FAILED_INTERNAL_ERROR,
+    OM_NOT_INITIALIZED,
+    SCM_VERSION_MISMATCH_ERROR
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
new file mode 100644
index 0000000..5091545
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.exceptions;
+// Exception thrown by OM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..7904d5d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+/*
+ This package contains the Ozone Manager classes.
+ */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 38e7797..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,559 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.common.collect.Lists;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListResponse;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB}
- * to the KeySpaceManagerService server implementation.
- */
-public class KeySpaceManagerProtocolServerSideTranslatorPB implements
-    KeySpaceManagerProtocolPB {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(KeySpaceManagerProtocolServerSideTranslatorPB.class);
-  private final KeySpaceManagerProtocol impl;
-
-  /**
-   * Constructs an instance of the server handler.
-   *
-   * @param impl KeySpaceManagerProtocolPB
-   */
-  public KeySpaceManagerProtocolServerSideTranslatorPB(
-      KeySpaceManagerProtocol impl) {
-    this.impl = impl;
-  }
-
-  // Convert and exception to corresponding status code
-  private Status exceptionToResponseStatus(IOException ex) {
-    if (ex instanceof KSMException) {
-      KSMException ksmException = (KSMException)ex;
-      switch (ksmException.getResult()) {
-      case FAILED_VOLUME_ALREADY_EXISTS:
-        return Status.VOLUME_ALREADY_EXISTS;
-      case FAILED_TOO_MANY_USER_VOLUMES:
-        return Status.USER_TOO_MANY_VOLUMES;
-      case FAILED_VOLUME_NOT_FOUND:
-        return Status.VOLUME_NOT_FOUND;
-      case FAILED_VOLUME_NOT_EMPTY:
-        return Status.VOLUME_NOT_EMPTY;
-      case FAILED_USER_NOT_FOUND:
-        return Status.USER_NOT_FOUND;
-      case FAILED_BUCKET_ALREADY_EXISTS:
-        return Status.BUCKET_ALREADY_EXISTS;
-      case FAILED_BUCKET_NOT_FOUND:
-        return Status.BUCKET_NOT_FOUND;
-      case FAILED_BUCKET_NOT_EMPTY:
-        return Status.BUCKET_NOT_EMPTY;
-      case FAILED_KEY_ALREADY_EXISTS:
-        return Status.KEY_ALREADY_EXISTS;
-      case FAILED_KEY_NOT_FOUND:
-        return Status.KEY_NOT_FOUND;
-      case FAILED_INVALID_KEY_NAME:
-        return Status.INVALID_KEY_NAME;
-      default:
-        return Status.INTERNAL_ERROR;
-      }
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Unknown error occurs", ex);
-      }
-      return Status.INTERNAL_ERROR;
-    }
-  }
-
-  @Override
-  public CreateVolumeResponse createVolume(
-      RpcController controller, CreateVolumeRequest request)
-      throws ServiceException {
-    CreateVolumeResponse.Builder resp = CreateVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.createVolume(KsmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public SetVolumePropertyResponse setVolumeProperty(
-      RpcController controller, SetVolumePropertyRequest request)
-      throws ServiceException {
-    SetVolumePropertyResponse.Builder resp =
-        SetVolumePropertyResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    String volume = request.getVolumeName();
-
-    try {
-      if (request.hasQuotaInBytes()) {
-        long quota = request.getQuotaInBytes();
-        impl.setQuota(volume, quota);
-      } else {
-        String owner = request.getOwnerName();
-        impl.setOwner(volume, owner);
-      }
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CheckVolumeAccessResponse checkVolumeAccess(
-      RpcController controller, CheckVolumeAccessRequest request)
-      throws ServiceException {
-    CheckVolumeAccessResponse.Builder resp =
-        CheckVolumeAccessResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      boolean access = impl.checkVolumeAccess(request.getVolumeName(),
-          request.getUserAcl());
-      // if no access, set the response status as access denied
-      if (!access) {
-        resp.setStatus(Status.ACCESS_DENIED);
-      }
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-
-    return resp.build();
-  }
-
-  @Override
-  public InfoVolumeResponse infoVolume(
-      RpcController controller, InfoVolumeRequest request)
-      throws ServiceException {
-    InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    String volume = request.getVolumeName();
-    try {
-      KsmVolumeArgs ret = impl.getVolumeInfo(volume);
-      resp.setVolumeInfo(ret.getProtobuf());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public DeleteVolumeResponse deleteVolume(
-      RpcController controller, DeleteVolumeRequest request)
-      throws ServiceException {
-    DeleteVolumeResponse.Builder resp = DeleteVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.deleteVolume(request.getVolumeName());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListVolumeResponse listVolumes(
-      RpcController controller, ListVolumeRequest request)
-      throws ServiceException {
-    ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder();
-    List<KsmVolumeArgs> result = Lists.newArrayList();
-    try {
-      if (request.getScope()
-          == ListVolumeRequest.Scope.VOLUMES_BY_USER) {
-        result = impl.listVolumeByUser(request.getUserName(),
-            request.getPrefix(), request.getPrevKey(), request.getMaxKeys());
-      } else if (request.getScope()
-          == ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER) {
-        result = impl.listAllVolumes(request.getPrefix(), request.getPrevKey(),
-            request.getMaxKeys());
-      }
-
-      if (result == null) {
-        throw new ServiceException("Failed to get volumes for given scope "
-            + request.getScope());
-      }
-
-      result.forEach(item -> resp.addVolumeInfo(item.getProtobuf()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CreateBucketResponse createBucket(
-      RpcController controller, CreateBucketRequest
-      request) throws ServiceException {
-    CreateBucketResponse.Builder resp =
-        CreateBucketResponse.newBuilder();
-    try {
-      impl.createBucket(KsmBucketInfo.getFromProtobuf(
-          request.getBucketInfo()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public InfoBucketResponse infoBucket(
-      RpcController controller, InfoBucketRequest request)
-      throws ServiceException {
-    InfoBucketResponse.Builder resp =
-        InfoBucketResponse.newBuilder();
-    try {
-      KsmBucketInfo ksmBucketInfo = impl.getBucketInfo(
-          request.getVolumeName(), request.getBucketName());
-      resp.setStatus(Status.OK);
-      resp.setBucketInfo(ksmBucketInfo.getProtobuf());
-    } catch(IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse createKey(
-      RpcController controller, LocateKeyRequest request
-  ) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      HddsProtos.ReplicationType type =
-          keyArgs.hasType()? keyArgs.getType() : null;
-      HddsProtos.ReplicationFactor factor =
-          keyArgs.hasFactor()? keyArgs.getFactor() : null;
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .setDataSize(keyArgs.getDataSize())
-          .setType(type)
-          .setFactor(factor)
-          .build();
-      if (keyArgs.hasDataSize()) {
-        ksmKeyArgs.setDataSize(keyArgs.getDataSize());
-      } else {
-        ksmKeyArgs.setDataSize(0);
-      }
-      OpenKeySession openKey = impl.openKey(ksmKeyArgs);
-      resp.setKeyInfo(openKey.getKeyInfo().getProtobuf());
-      resp.setID(openKey.getId());
-      resp.setOpenVersion(openKey.getOpenVersion());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse lookupKey(
-      RpcController controller, LocateKeyRequest request
-  ) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      KsmKeyInfo keyInfo = impl.lookupKey(ksmKeyArgs);
-      resp.setKeyInfo(keyInfo.getProtobuf());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public RenameKeyResponse renameKey(
-      RpcController controller, RenameKeyRequest request)
-      throws ServiceException {
-    RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      impl.renameKey(ksmKeyArgs, request.getToKeyName());
-      resp.setStatus(Status.OK);
-    } catch (IOException e){
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public SetBucketPropertyResponse setBucketProperty(
-      RpcController controller, SetBucketPropertyRequest request)
-      throws ServiceException {
-    SetBucketPropertyResponse.Builder resp =
-        SetBucketPropertyResponse.newBuilder();
-    try {
-      impl.setBucketProperty(KsmBucketArgs.getFromProtobuf(
-          request.getBucketArgs()));
-      resp.setStatus(Status.OK);
-    } catch(IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse deleteKey(RpcController controller,
-      LocateKeyRequest request) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      impl.deleteKey(ksmKeyArgs);
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public DeleteBucketResponse deleteBucket(
-      RpcController controller, DeleteBucketRequest request)
-      throws ServiceException {
-    DeleteBucketResponse.Builder resp = DeleteBucketResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.deleteBucket(request.getVolumeName(), request.getBucketName());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListBucketsResponse listBuckets(
-      RpcController controller, ListBucketsRequest request)
-      throws ServiceException {
-    ListBucketsResponse.Builder resp =
-        ListBucketsResponse.newBuilder();
-    try {
-      List<KsmBucketInfo> buckets = impl.listBuckets(
-          request.getVolumeName(),
-          request.getStartKey(),
-          request.getPrefix(),
-          request.getCount());
-      for(KsmBucketInfo bucket : buckets) {
-        resp.addBucketInfo(bucket.getProtobuf());
-      }
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListKeysResponse listKeys(RpcController controller,
-      ListKeysRequest request) throws ServiceException {
-    ListKeysResponse.Builder resp =
-        ListKeysResponse.newBuilder();
-    try {
-      List<KsmKeyInfo> keys = impl.listKeys(
-          request.getVolumeName(),
-          request.getBucketName(),
-          request.getStartKey(),
-          request.getPrefix(),
-          request.getCount());
-      for(KsmKeyInfo key : keys) {
-        resp.addKeyInfo(key.getProtobuf());
-      }
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CommitKeyResponse commitKey(RpcController controller,
-      CommitKeyRequest request) throws ServiceException {
-    CommitKeyResponse.Builder resp =
-        CommitKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      HddsProtos.ReplicationType type =
-          keyArgs.hasType()? keyArgs.getType() : null;
-      HddsProtos.ReplicationFactor factor =
-          keyArgs.hasFactor()? keyArgs.getFactor() : null;
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .setDataSize(keyArgs.getDataSize())
-          .setType(type)
-          .setFactor(factor)
-          .build();
-      int id = request.getClientID();
-      impl.commitKey(ksmKeyArgs, id);
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public AllocateBlockResponse allocateBlock(RpcController controller,
-      AllocateBlockRequest request) throws ServiceException {
-    AllocateBlockResponse.Builder resp =
-        AllocateBlockResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      int id = request.getClientID();
-      KsmKeyLocationInfo newLocation = impl.allocateBlock(ksmKeyArgs, id);
-      resp.setKeyLocation(newLocation.getProtobuf());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ServiceListResponse getServiceList(RpcController controller,
-      ServiceListRequest request) throws ServiceException {
-    ServiceListResponse.Builder resp = ServiceListResponse.newBuilder();
-    try {
-      resp.addAllServiceInfo(impl.getServiceList().stream()
-          .map(ServiceInfo::getProtobuf)
-          .collect(Collectors.toList()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..40a88b6
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -0,0 +1,571 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListResponse;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * This class is the server-side translator that forwards requests received on
+ * {@link OzoneManagerProtocolPB}
+ * to the OzoneManagerService server implementation.
+ */
+public class OzoneManagerProtocolServerSideTranslatorPB implements
+    OzoneManagerProtocolPB {
+  private static final Logger LOG = LoggerFactory
+      .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class);
+  private final OzoneManagerProtocol impl;
+
+  /**
+   * Constructs an instance of the server handler.
+   *
+   * @param impl OzoneManagerProtocolPB
+   */
+  public OzoneManagerProtocolServerSideTranslatorPB(
+      OzoneManagerProtocol impl) {
+    this.impl = impl;
+  }
+
+  // Convert and exception to corresponding status code
+  private Status exceptionToResponseStatus(IOException ex) {
+    if (ex instanceof OMException) {
+      OMException omException = (OMException)ex;
+      switch (omException.getResult()) {
+      case FAILED_VOLUME_ALREADY_EXISTS:
+        return Status.VOLUME_ALREADY_EXISTS;
+      case FAILED_TOO_MANY_USER_VOLUMES:
+        return Status.USER_TOO_MANY_VOLUMES;
+      case FAILED_VOLUME_NOT_FOUND:
+        return Status.VOLUME_NOT_FOUND;
+      case FAILED_VOLUME_NOT_EMPTY:
+        return Status.VOLUME_NOT_EMPTY;
+      case FAILED_USER_NOT_FOUND:
+        return Status.USER_NOT_FOUND;
+      case FAILED_BUCKET_ALREADY_EXISTS:
+        return Status.BUCKET_ALREADY_EXISTS;
+      case FAILED_BUCKET_NOT_FOUND:
+        return Status.BUCKET_NOT_FOUND;
+      case FAILED_BUCKET_NOT_EMPTY:
+        return Status.BUCKET_NOT_EMPTY;
+      case FAILED_KEY_ALREADY_EXISTS:
+        return Status.KEY_ALREADY_EXISTS;
+      case FAILED_KEY_NOT_FOUND:
+        return Status.KEY_NOT_FOUND;
+      case FAILED_INVALID_KEY_NAME:
+        return Status.INVALID_KEY_NAME;
+      case FAILED_KEY_ALLOCATION:
+        return Status.KEY_ALLOCATION_ERROR;
+      case FAILED_KEY_DELETION:
+        return Status.KEY_DELETION_ERROR;
+      case FAILED_KEY_RENAME:
+        return Status.KEY_RENAME_ERROR;
+      case FAILED_METADATA_ERROR:
+        return Status.METADATA_ERROR;
+      case OM_NOT_INITIALIZED:
+        return Status.OM_NOT_INITIALIZED;
+      case SCM_VERSION_MISMATCH_ERROR:
+        return Status.SCM_VERSION_MISMATCH_ERROR;
+      default:
+        return Status.INTERNAL_ERROR;
+      }
+    } else {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Unknown error occurs", ex);
+      }
+      return Status.INTERNAL_ERROR;
+    }
+  }
+
+  @Override
+  public CreateVolumeResponse createVolume(
+      RpcController controller, CreateVolumeRequest request)
+      throws ServiceException {
+    CreateVolumeResponse.Builder resp = CreateVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.createVolume(OmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public SetVolumePropertyResponse setVolumeProperty(
+      RpcController controller, SetVolumePropertyRequest request)
+      throws ServiceException {
+    SetVolumePropertyResponse.Builder resp =
+        SetVolumePropertyResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    String volume = request.getVolumeName();
+
+    try {
+      if (request.hasQuotaInBytes()) {
+        long quota = request.getQuotaInBytes();
+        impl.setQuota(volume, quota);
+      } else {
+        String owner = request.getOwnerName();
+        impl.setOwner(volume, owner);
+      }
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CheckVolumeAccessResponse checkVolumeAccess(
+      RpcController controller, CheckVolumeAccessRequest request)
+      throws ServiceException {
+    CheckVolumeAccessResponse.Builder resp =
+        CheckVolumeAccessResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      boolean access = impl.checkVolumeAccess(request.getVolumeName(),
+          request.getUserAcl());
+      // if no access, set the response status as access denied
+      if (!access) {
+        resp.setStatus(Status.ACCESS_DENIED);
+      }
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+
+    return resp.build();
+  }
+
+  @Override
+  public InfoVolumeResponse infoVolume(
+      RpcController controller, InfoVolumeRequest request)
+      throws ServiceException {
+    InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    String volume = request.getVolumeName();
+    try {
+      OmVolumeArgs ret = impl.getVolumeInfo(volume);
+      resp.setVolumeInfo(ret.getProtobuf());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public DeleteVolumeResponse deleteVolume(
+      RpcController controller, DeleteVolumeRequest request)
+      throws ServiceException {
+    DeleteVolumeResponse.Builder resp = DeleteVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.deleteVolume(request.getVolumeName());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListVolumeResponse listVolumes(
+      RpcController controller, ListVolumeRequest request)
+      throws ServiceException {
+    ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder();
+    List<OmVolumeArgs> result = Lists.newArrayList();
+    try {
+      if (request.getScope()
+          == ListVolumeRequest.Scope.VOLUMES_BY_USER) {
+        result = impl.listVolumeByUser(request.getUserName(),
+            request.getPrefix(), request.getPrevKey(), request.getMaxKeys());
+      } else if (request.getScope()
+          == ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER) {
+        result = impl.listAllVolumes(request.getPrefix(), request.getPrevKey(),
+            request.getMaxKeys());
+      }
+
+      if (result == null) {
+        throw new ServiceException("Failed to get volumes for given scope "
+            + request.getScope());
+      }
+
+      result.forEach(item -> resp.addVolumeInfo(item.getProtobuf()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CreateBucketResponse createBucket(
+      RpcController controller, CreateBucketRequest
+      request) throws ServiceException {
+    CreateBucketResponse.Builder resp =
+        CreateBucketResponse.newBuilder();
+    try {
+      impl.createBucket(OmBucketInfo.getFromProtobuf(
+          request.getBucketInfo()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public InfoBucketResponse infoBucket(
+      RpcController controller, InfoBucketRequest request)
+      throws ServiceException {
+    InfoBucketResponse.Builder resp =
+        InfoBucketResponse.newBuilder();
+    try {
+      OmBucketInfo omBucketInfo = impl.getBucketInfo(
+          request.getVolumeName(), request.getBucketName());
+      resp.setStatus(Status.OK);
+      resp.setBucketInfo(omBucketInfo.getProtobuf());
+    } catch(IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse createKey(
+      RpcController controller, LocateKeyRequest request
+  ) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      HddsProtos.ReplicationType type =
+          keyArgs.hasType()? keyArgs.getType() : null;
+      HddsProtos.ReplicationFactor factor =
+          keyArgs.hasFactor()? keyArgs.getFactor() : null;
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .setDataSize(keyArgs.getDataSize())
+          .setType(type)
+          .setFactor(factor)
+          .build();
+      if (keyArgs.hasDataSize()) {
+        omKeyArgs.setDataSize(keyArgs.getDataSize());
+      } else {
+        omKeyArgs.setDataSize(0);
+      }
+      OpenKeySession openKey = impl.openKey(omKeyArgs);
+      resp.setKeyInfo(openKey.getKeyInfo().getProtobuf());
+      resp.setID(openKey.getId());
+      resp.setOpenVersion(openKey.getOpenVersion());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse lookupKey(
+      RpcController controller, LocateKeyRequest request
+  ) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      OmKeyInfo keyInfo = impl.lookupKey(omKeyArgs);
+      resp.setKeyInfo(keyInfo.getProtobuf());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public RenameKeyResponse renameKey(
+      RpcController controller, RenameKeyRequest request)
+      throws ServiceException {
+    RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      impl.renameKey(omKeyArgs, request.getToKeyName());
+      resp.setStatus(Status.OK);
+    } catch (IOException e){
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public SetBucketPropertyResponse setBucketProperty(
+      RpcController controller, SetBucketPropertyRequest request)
+      throws ServiceException {
+    SetBucketPropertyResponse.Builder resp =
+        SetBucketPropertyResponse.newBuilder();
+    try {
+      impl.setBucketProperty(OmBucketArgs.getFromProtobuf(
+          request.getBucketArgs()));
+      resp.setStatus(Status.OK);
+    } catch(IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse deleteKey(RpcController controller,
+      LocateKeyRequest request) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      impl.deleteKey(omKeyArgs);
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public DeleteBucketResponse deleteBucket(
+      RpcController controller, DeleteBucketRequest request)
+      throws ServiceException {
+    DeleteBucketResponse.Builder resp = DeleteBucketResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.deleteBucket(request.getVolumeName(), request.getBucketName());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListBucketsResponse listBuckets(
+      RpcController controller, ListBucketsRequest request)
+      throws ServiceException {
+    ListBucketsResponse.Builder resp =
+        ListBucketsResponse.newBuilder();
+    try {
+      List<OmBucketInfo> buckets = impl.listBuckets(
+          request.getVolumeName(),
+          request.getStartKey(),
+          request.getPrefix(),
+          request.getCount());
+      for(OmBucketInfo bucket : buckets) {
+        resp.addBucketInfo(bucket.getProtobuf());
+      }
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListKeysResponse listKeys(RpcController controller,
+      ListKeysRequest request) throws ServiceException {
+    ListKeysResponse.Builder resp =
+        ListKeysResponse.newBuilder();
+    try {
+      List<OmKeyInfo> keys = impl.listKeys(
+          request.getVolumeName(),
+          request.getBucketName(),
+          request.getStartKey(),
+          request.getPrefix(),
+          request.getCount());
+      for(OmKeyInfo key : keys) {
+        resp.addKeyInfo(key.getProtobuf());
+      }
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CommitKeyResponse commitKey(RpcController controller,
+      CommitKeyRequest request) throws ServiceException {
+    CommitKeyResponse.Builder resp =
+        CommitKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      HddsProtos.ReplicationType type =
+          keyArgs.hasType()? keyArgs.getType() : null;
+      HddsProtos.ReplicationFactor factor =
+          keyArgs.hasFactor()? keyArgs.getFactor() : null;
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .setDataSize(keyArgs.getDataSize())
+          .setType(type)
+          .setFactor(factor)
+          .build();
+      int id = request.getClientID();
+      impl.commitKey(omKeyArgs, id);
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public AllocateBlockResponse allocateBlock(RpcController controller,
+      AllocateBlockRequest request) throws ServiceException {
+    AllocateBlockResponse.Builder resp =
+        AllocateBlockResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      int id = request.getClientID();
+      OmKeyLocationInfo newLocation = impl.allocateBlock(omKeyArgs, id);
+      resp.setKeyLocation(newLocation.getProtobuf());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ServiceListResponse getServiceList(RpcController controller,
+      ServiceListRequest request) throws ServiceException {
+    ServiceListResponse.Builder resp = ServiceListResponse.newBuilder();
+    try {
+      resp.addAllServiceInfo(impl.getServiceList().stream()
+          .map(ServiceInfo::getProtobuf)
+          .collect(Collectors.toList()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
index e9c2430..9bc393d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
@@ -18,5 +18,5 @@
 package org.apache.hadoop.ozone.protocolPB;
 
 /**
- * KSM protocol buffer translators.
+ * OM protocol buffer translators.
  */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
deleted file mode 100644
index 7f18028..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
+++ /dev/null
@@ -1,70 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
-    <meta name="description" content="HDFS Key Space Manager">
-
-    <title>HDFS Key Space Manager</title>
-
-    <link href="static/bootstrap-3.3.7/css/bootstrap.min.css" rel="stylesheet">
-    <link href="static/hadoop.css" rel="stylesheet">
-    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
-
-    <link href="static/ozone.css" rel="stylesheet">
-
-</head>
-
-<body ng-app="ksm">
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">HDFS KSM</a>
-        </div>
-        <navmenu
-                metrics="{ 'Ksm metrics' : '#!/metrics/ksm', 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
-    </div>
-</header>
-
-<div class="container-fluid">
-    <ng-view></ng-view>
-</div><!-- /.container -->
-
-<script src="static/jquery-3.3.1.min.js"></script>
-<script src="static/angular-1.6.4.min.js"></script>
-<script src="static/angular-route-1.6.4.min.js"></script>
-<script src="static/d3-3.5.17.min.js"></script>
-<script src="static/nvd3-1.8.5.min.js"></script>
-<script src="static/angular-nvd3-1.0.9.min.js"></script>
-<script src="static/ozone.js"></script>
-<script src="ksm.js"></script>
-<script src="static/bootstrap-3.3.7/js/bootstrap.min.js"></script>
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
deleted file mode 100644
index e63fb00..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
+++ /dev/null
@@ -1,44 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h1>KSM Metrics</h1>
-
-<div ng-repeat="(type,numbers) in $ctrl.metrics.nums">
-    <h2>{{type}}</h2>
-    <div class="container">
-        <div class="col-md-6">
-            <h3>Requests ({{numbers.ops}} ops)</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.all"></nvd3>
-        </div>
-        <div class="col-md-6">
-            <h3>Failures</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.failures"></nvd3>
-        </div>
-    </div>
-</div>
-
-<div ng-show="$ctrl.metrics.others.length > 0">
-    <h2>Other JMX properties</h2>
-
-    <table class="table">
-        <tr ng-repeat="metric in $ctrl.metrics.others">
-            <td>{{metric.key}}</td>
-            <td>{{metric.value}}</td>
-        </tr>
-    </table>
-</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
deleted file mode 100644
index ab6f73b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-
-    var isIgnoredJmxKeys = function (key) {
-        return key == 'name' || key == 'modelerType' || key.match(/tag.*/);
-    };
-
-    angular.module('ksm', ['ozone', 'nvd3']);
-    angular.module('ksm').config(function ($routeProvider) {
-        $routeProvider
-            .when("/metrics/ksm", {
-                template: "<ksm-metrics></ksm-metrics>"
-            });
-    });
-    angular.module('ksm').component('ksmMetrics', {
-        templateUrl: 'ksm-metrics.html',
-        controller: function ($http) {
-            var ctrl = this;
-
-            ctrl.graphOptions = {
-                chart: {
-                    type: 'pieChart',
-                    height: 500,
-                    x: function (d) {
-                        return d.key;
-                    },
-                    y: function (d) {
-                        return d.value;
-                    },
-                    showLabels: true,
-                    labelType: 'value',
-                    duration: 500,
-                    labelThreshold: 0.01,
-                    valueFormat: function(d) {
-                        return d3.format('d')(d);
-                    },
-                    legend: {
-                        margin: {
-                            top: 5,
-                            right: 35,
-                            bottom: 5,
-                            left: 0
-                        }
-                    }
-                }
-            };
-
-
-            $http.get("jmx?qry=Hadoop:service=KeySpaceManager,name=KSMMetrics")
-                .then(function (result) {
-
-                    var groupedMetrics = {others: [], nums: {}};
-                    var metrics = result.data.beans[0]
-                    for (var key in metrics) {
-                        var numericalStatistic = key.match(/Num([A-Z][a-z]+)(.+?)(Fails)?$/);
-                        if (numericalStatistic) {
-                            var type = numericalStatistic[1];
-                            var name = numericalStatistic[2];
-                            var failed = numericalStatistic[3];
-                            groupedMetrics.nums[type] = groupedMetrics.nums[type] || {
-                                    failures: [],
-                                    all: []
-                                };
-                            if (failed) {
-                                groupedMetrics.nums[type].failures.push({
-                                    key: name,
-                                    value: metrics[key]
-                                })
-                            } else {
-                                if (name == "Ops") {
-                                    groupedMetrics.nums[type].ops = metrics[key]
-                                } else {
-                                    groupedMetrics.nums[type].all.push({
-                                        key: name,
-                                        value: metrics[key]
-                                    })
-                                }
-                            }
-                        } else if (isIgnoredJmxKeys(key)) {
-                            //ignore
-                        } else {
-                            groupedMetrics.others.push({
-                                'key': key,
-                                'value': metrics[key]
-                            });
-                        }
-                    }
-                    ctrl.metrics = groupedMetrics;
-                })
-        }
-    });
-
-})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
deleted file mode 100644
index e442adc..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- *   Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
-*/
-body {
-  padding-top: 50px;
-}
-.starter-template {
-  padding: 40px 15px;
-  text-align: center;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
deleted file mode 100644
index 0821899..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<overview>
-</overview>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
new file mode 100644
index 0000000..ba54cb2
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
@@ -0,0 +1,70 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<html lang="en">
+<head>
+    <meta charset="utf-8">
+    <meta http-equiv="X-UA-Compatible" content="IE=edge">
+    <meta name="viewport" content="width=device-width, initial-scale=1">
+    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
+    <meta name="description" content="Ozone Manager">
+
+    <title>Ozone Manager</title>
+
+    <link href="static/bootstrap-3.3.7/css/bootstrap.min.css" rel="stylesheet">
+    <link href="static/hadoop.css" rel="stylesheet">
+    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
+
+    <link href="static/ozone.css" rel="stylesheet">
+
+</head>
+
+<body ng-app="ozoneManager">
+
+<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
+    <div class="container-fluid">
+        <div class="navbar-header">
+            <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
+                    aria-expanded="false" aria-controls="navbar">
+                <span class="sr-only">Toggle navigation</span>
+                <span class="icon-bar"></span>
+                <span class="icon-bar"></span>
+                <span class="icon-bar"></span>
+            </button>
+            <a class="navbar-brand" href="#">Ozone Manager</a>
+        </div>
+        <navmenu
+                metrics="{ 'OM metrics' : '#!/metrics/ozoneManager', 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
+    </div>
+</header>
+
+<div class="container-fluid">
+    <ng-view></ng-view>
+</div><!-- /.container -->
+
+<script src="static/jquery-3.3.1.min.js"></script>
+<script src="static/angular-1.6.4.min.js"></script>
+<script src="static/angular-route-1.6.4.min.js"></script>
+<script src="static/d3-3.5.17.min.js"></script>
+<script src="static/nvd3-1.8.5.min.js"></script>
+<script src="static/angular-nvd3-1.0.9.min.js"></script>
+<script src="static/ozone.js"></script>
+<script src="ozoneManager.js"></script>
+<script src="static/bootstrap-3.3.7/js/bootstrap.min.js"></script>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
new file mode 100644
index 0000000..e442adc
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
@@ -0,0 +1,23 @@
+/**
+ *   Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+*/
+body {
+  padding-top: 50px;
+}
+.starter-template {
+  padding: 40px 15px;
+  text-align: center;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
new file mode 100644
index 0000000..0821899
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
@@ -0,0 +1,18 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+<overview>
+</overview>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
new file mode 100644
index 0000000..15fba2f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
@@ -0,0 +1,44 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<h1>OzoneManager Metrics</h1>
+
+<div ng-repeat="(type,numbers) in $ctrl.metrics.nums">
+    <h2>{{type}}</h2>
+    <div class="container">
+        <div class="col-md-6">
+            <h3>Requests ({{numbers.ops}} ops)</h3>
+            <nvd3 options="$ctrl.graphOptions"
+                  data="numbers.all"></nvd3>
+        </div>
+        <div class="col-md-6">
+            <h3>Failures</h3>
+            <nvd3 options="$ctrl.graphOptions"
+                  data="numbers.failures"></nvd3>
+        </div>
+    </div>
+</div>
+
+<div ng-show="$ctrl.metrics.others.length > 0">
+    <h2>Other JMX properties</h2>
+
+    <table class="table">
+        <tr ng-repeat="metric in $ctrl.metrics.others">
+            <td>{{metric.key}}</td>
+            <td>{{metric.value}}</td>
+        </tr>
+    </table>
+</div>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

Posted by vi...@apache.org.
YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99febe7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99febe7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99febe7f

Branch: refs/heads/HDFS-12090
Commit: 99febe7fd50c31c0f5dd40fa7f376f2c1f64f8c3
Parents: 1726247
Author: Robert Kanter <rk...@apache.org>
Authored: Thu Jul 5 10:54:19 2018 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Thu Jul 5 10:54:19 2018 -0700

----------------------------------------------------------------------
 .../resourcemanager/webapp/dao/AppInfo.java     |   2 +-
 .../webapp/dao/SchedulerInfo.java               |   8 +-
 .../fair/TestFairSchedulerConfiguration.java    |   9 +-
 .../webapp/TestRMWebServices.java               |  31 ++-
 .../webapp/TestRMWebServicesApps.java           |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 +++++++++++++++++
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 +
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 +++----
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ++++++++++
 .../FairSchedulerJsonVerifications.java         | 139 ++++++++++
 .../FairSchedulerXmlVerifications.java          | 153 +++++++++++
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 +++++++++++++++++++
 .../webapp/helper/AppInfoJsonVerifications.java | 123 +++++++++
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 +++++++++
 .../webapp/helper/BufferedClientResponse.java   |  57 ++++
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ++++++
 .../ResourceRequestsJsonVerifications.java      | 252 +++++++++++++++++
 .../ResourceRequestsXmlVerifications.java       | 215 +++++++++++++++
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 ++++++++
 21 files changed, 2020 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index d47f13d..9d82bc7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -479,7 +479,7 @@ public class AppInfo {
   public int getNumNonAMContainersPreempted() {
     return numNonAMContainerPreempted;
   }
-  
+
   public int getNumAMContainersPreempted() {
     return numAMContainerPreempted;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index 81491b1..163f707 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -41,8 +41,9 @@ public class SchedulerInfo {
   protected EnumSet<SchedulerResourceTypes> schedulingResourceTypes;
   protected int maximumClusterPriority;
 
+  // JAXB needs this
   public SchedulerInfo() {
-  } // JAXB needs this
+  }
 
   public SchedulerInfo(final ResourceManager rm) {
     ResourceScheduler rs = rm.getResourceScheduler();
@@ -74,7 +75,10 @@ public class SchedulerInfo {
   }
 
   public String getSchedulerResourceTypes() {
-    return Arrays.toString(minAllocResource.getResource().getResources());
+    if (minAllocResource != null) {
+      return Arrays.toString(minAllocResource.getResource().getResources());
+    }
+    return null;
   }
 
   public int getMaxClusterLevelAppPriority() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 76a5af5..70f83ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -48,6 +48,9 @@ import org.apache.log4j.spi.LoggingEvent;
 import org.junit.Assert;
 import org.junit.Test;
 
+/**
+ * Tests fair scheduler configuration.
+ */
 public class TestFairSchedulerConfiguration {
 
   private static final String A_CUSTOM_RESOURCE = "a-custom-resource";
@@ -242,12 +245,12 @@ public class TestFairSchedulerConfiguration {
         parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
             + "test1 = 50 % ").getResource(clusterResource));
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testNoUnits() throws Exception {
     parseResourceConfigValue("1024");
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testOnlyMemory() throws Exception {
     parseResourceConfigValue("1024mb");
@@ -257,7 +260,7 @@ public class TestFairSchedulerConfiguration {
   public void testOnlyCPU() throws Exception {
     parseResourceConfigValue("1024vcores");
   }
-  
+
   @Test(expected = AllocationConfigurationException.class)
   public void testGibberish() throws Exception {
     parseResourceConfigValue("1o24vc0res");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 0702d65..3902889 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -53,11 +53,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService;
-import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
-import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.*;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -76,11 +72,12 @@ import org.apache.hadoop.yarn.webapp.JerseyTestBase;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
-import org.eclipse.jetty.server.Response;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.NodeList;
@@ -96,6 +93,8 @@ import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 public class TestRMWebServices extends JerseyTestBase {
+  private static final Logger LOG =
+          LoggerFactory.getLogger(TestRMWebServices.class);
 
   private static MockRM rm;
 
@@ -472,19 +471,19 @@ public class TestRMWebServices extends JerseyTestBase {
     QueueMetrics metrics = rs.getRootQueueMetrics();
     ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
 
-    long totalMBExpect = 
+    long totalMBExpect =
         metrics.getAvailableMB() + metrics.getAllocatedMB();
-    long totalVirtualCoresExpect = 
+    long totalVirtualCoresExpect =
         metrics.getAvailableVirtualCores() + metrics.getAllocatedVirtualCores();
-    assertEquals("appsSubmitted doesn't match", 
+    assertEquals("appsSubmitted doesn't match",
         metrics.getAppsSubmitted(), submittedApps);
-    assertEquals("appsCompleted doesn't match", 
+    assertEquals("appsCompleted doesn't match",
         metrics.getAppsCompleted(), completedApps);
     assertEquals("reservedMB doesn't match",
         metrics.getReservedMB(), reservedMB);
-    assertEquals("availableMB doesn't match", 
+    assertEquals("availableMB doesn't match",
         metrics.getAvailableMB(), availableMB);
-    assertEquals("allocatedMB doesn't match", 
+    assertEquals("allocatedMB doesn't match",
         metrics.getAllocatedMB(), allocMB);
     assertEquals("reservedVirtualCores doesn't match",
         metrics.getReservedVirtualCores(), reservedVirtualCores);
@@ -597,11 +596,13 @@ public class TestRMWebServices extends JerseyTestBase {
 
   public void verifyClusterSchedulerFifo(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("incorrect number of elements in: " + json, 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements", 1, info.length());
+    assertEquals("incorrect number of elements in: " + info, 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements", 11, info.length());
+
+    LOG.debug("schedulerInfo: {}", info);
+    assertEquals("incorrect number of elements in: " + info, 11, info.length());
 
     verifyClusterSchedulerFifoGeneric(info.getString("type"),
         info.getString("qstate"), (float) info.getDouble("capacity"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index 6c6f400..15f94e1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -79,7 +79,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
 public class TestRMWebServicesApps extends JerseyTestBase {
 
   private static MockRM rm;
-  
+
   private static final int CONTAINER_MB = 1024;
 
   private static class WebServletModule extends ServletModule {
@@ -324,7 +324,7 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+    assertTrue("both app states of ACCEPTED and KILLED are not present",
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
@@ -375,12 +375,12 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("incorrect number of elements", 1, apps.length());
     array = apps.getJSONArray("app");
     assertEquals("incorrect number of elements", 2, array.length());
-    assertTrue("both app states of ACCEPTED and KILLED are not present", 
+    assertTrue("both app states of ACCEPTED and KILLED are not present",
         (array.getJSONObject(0).getString("state").equals("ACCEPTED") &&
         array.getJSONObject(1).getString("state").equals("KILLED")) ||
         (array.getJSONObject(0).getString("state").equals("KILLED") &&
         array.getJSONObject(1).getString("state").equals("ACCEPTED")));
-    
+
     rm.stop();
   }
 
@@ -511,7 +511,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("finalStatus", FinalApplicationStatus.UNDEFINED.toString())
+        .path("apps").queryParam("finalStatus",
+                    FinalApplicationStatus.UNDEFINED.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
@@ -1804,7 +1805,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     int numAttempt = 1;
     while (true) {
       // fail the AM by sending CONTAINER_FINISHED event without registering.
-      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
+      amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(), 1,
+              ContainerState.COMPLETE);
       rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FAILED);
       if (numAttempt == maxAppAttempts) {
         rm.waitForState(app1.getApplicationId(), RMAppState.FAILED);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
new file mode 100644
index 0000000..83e0056
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsCustomResourceTypes.java
@@ -0,0 +1,242 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler.CustomResourceTypesConfigurationProvider;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoJsonVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.AppInfoXmlVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.BufferedClientResponse;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.JsonCustomResourceTypeTestcase;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsJsonVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.ResourceRequestsXmlVerifications;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.ws.rs.core.MediaType;
+import java.util.ArrayList;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This test verifies that custom resource types are correctly serialized to XML
+ * and JSON when HTTP GET request is sent to the resource: ws/v1/cluster/apps.
+ */
+public class TestRMWebServicesAppsCustomResourceTypes extends JerseyTestBase {
+
+  private static MockRM rm;
+  private static final int CONTAINER_MB = 1024;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      Configuration conf = new Configuration();
+      conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
+          YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class,
+          ResourceScheduler.class);
+      initResourceTypes(conf);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+    private void initResourceTypes(Configuration conf) {
+      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+          CustomResourceTypesConfigurationProvider.class.getName());
+      ResourceUtils.resetResourceTypes(conf);
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createInjectorForWebServletModule();
+  }
+
+  private void createInjectorForWebServletModule() {
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
+  }
+
+  public TestRMWebServicesAppsCustomResourceTypes() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testRunningAppXml() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
+    am1.allocate("*", 2048, 1, new ArrayList<>());
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    XmlCustomResourceTypeTestCase testCase =
+            new XmlCustomResourceTypeTestCase(path,
+                    new BufferedClientResponse(response));
+    testCase.verify(document -> {
+      NodeList apps = document.getElementsByTagName("apps");
+      assertEquals("incorrect number of apps elements", 1, apps.getLength());
+
+      NodeList appArray = ((Element)(apps.item(0)))
+              .getElementsByTagName("app");
+      assertEquals("incorrect number of app elements", 1, appArray.getLength());
+
+      verifyAppsXML(appArray, app1);
+    });
+
+    rm.stop();
+  }
+
+  @Test
+  public void testRunningAppJson() throws Exception {
+    rm.start();
+    MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048);
+    RMApp app1 = rm.submitApp(CONTAINER_MB, "testwordcount", "user1");
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, amNodeManager);
+    am1.allocate("*", 2048, 1, new ArrayList<>());
+    amNodeManager.nodeHeartbeat(true);
+
+    WebResource r = resource();
+    WebResource path = r.path("ws").path("v1").path("cluster").path("apps");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    JsonCustomResourceTypeTestcase testCase =
+        new JsonCustomResourceTypeTestcase(path,
+            new BufferedClientResponse(response));
+    testCase.verify(json -> {
+      try {
+        assertEquals("incorrect number of apps elements", 1, json.length());
+        JSONObject apps = json.getJSONObject("apps");
+        assertEquals("incorrect number of app elements", 1, apps.length());
+        JSONArray array = apps.getJSONArray("app");
+        assertEquals("incorrect count of app", 1, array.length());
+
+        verifyAppInfoJson(array.getJSONObject(0), app1);
+      } catch (JSONException e) {
+        throw new RuntimeException(e);
+      }
+    });
+
+    rm.stop();
+  }
+
+  private void verifyAppsXML(NodeList appArray, RMApp app) {
+    for (int i = 0; i < appArray.getLength(); i++) {
+      Element element = (Element) appArray.item(i);
+      AppInfoXmlVerifications.verify(element, app);
+
+      NodeList resourceRequests =
+          element.getElementsByTagName("resourceRequests");
+      assertEquals(1, resourceRequests.getLength());
+      Node resourceRequest = resourceRequests.item(0);
+      ResourceRequest rr =
+          ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+              .getApplicationAttempt(
+                  app.getCurrentAppAttempt().getAppAttemptId())
+              .getAppSchedulingInfo().getAllResourceRequests().get(0);
+      ResourceRequestsXmlVerifications.verifyWithCustomResourceTypes(
+              (Element) resourceRequest, rr,
+          CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+    }
+  }
+
+  private void verifyAppInfoJson(JSONObject info, RMApp app) throws
+          JSONException {
+    int expectedNumberOfElements = getExpectedNumberOfElements(app);
+
+    assertEquals("incorrect number of elements", expectedNumberOfElements,
+        info.length());
+
+    AppInfoJsonVerifications.verify(info, app);
+
+    JSONArray resourceRequests = info.getJSONArray("resourceRequests");
+    JSONObject requestInfo = resourceRequests.getJSONObject(0);
+    ResourceRequest rr =
+        ((AbstractYarnScheduler) rm.getRMContext().getScheduler())
+            .getApplicationAttempt(app.getCurrentAppAttempt().getAppAttemptId())
+            .getAppSchedulingInfo().getAllResourceRequests().get(0);
+
+    ResourceRequestsJsonVerifications.verifyWithCustomResourceTypes(
+            requestInfo, rr,
+            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  private int getExpectedNumberOfElements(RMApp app) {
+    int expectedNumberOfElements = 40 + 2; // 2 -> resourceRequests
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+    }
+
+    if (app.getAMResourceRequests().get(0).getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+    }
+
+    if (AppInfo
+        .getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()) != null) {
+      expectedNumberOfElements++;
+    }
+    return expectedNumberOfElements;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
index e37f76f..46d0a66 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
@@ -146,7 +146,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     config.setUserLimitFactor(B2, 100.0f);
     config.setCapacity(B3, 0.5f);
     config.setUserLimitFactor(B3, 100.0f);
-    
+
     config.setQueues(A1, new String[] {"a1a", "a1b"});
     final String A1A = A1 + ".a1a";
     config.setCapacity(A1A, 85);
@@ -254,7 +254,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     }
   }
 
-  public void verifySubQueueXML(Element qElem, String q, 
+  public void verifySubQueueXML(Element qElem, String q,
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws Exception {
     NodeList children = qElem.getChildNodes();
@@ -317,30 +317,34 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
 
   private void verifyClusterScheduler(JSONObject json) throws JSONException,
       Exception {
-    assertEquals("incorrect number of elements", 1, json.length());
+    assertEquals("incorrect number of elements in: " + json, 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
-    assertEquals("incorrect number of elements", 1, info.length());
+    assertEquals("incorrect number of elements in: " + info, 1, info.length());
     info = info.getJSONObject("schedulerInfo");
-    assertEquals("incorrect number of elements", 8, info.length());
+    assertEquals("incorrect number of elements in: " + info, 8, info.length());
     verifyClusterSchedulerGeneric(info.getString("type"),
         (float) info.getDouble("usedCapacity"),
         (float) info.getDouble("capacity"),
         (float) info.getDouble("maxCapacity"), info.getString("queueName"));
     JSONObject health = info.getJSONObject("health");
     assertNotNull(health);
-    assertEquals("incorrect number of elements", 3, health.length());
+    assertEquals("incorrect number of elements in: " + health, 3,
+        health.length());
     JSONArray operationsInfo = health.getJSONArray("operationsInfo");
-    assertEquals("incorrect number of elements", 4, operationsInfo.length());
+    assertEquals("incorrect number of elements in: " + health, 4,
+        operationsInfo.length());
     JSONArray lastRunDetails = health.getJSONArray("lastRunDetails");
-    assertEquals("incorrect number of elements", 3, lastRunDetails.length());
+    assertEquals("incorrect number of elements in: " + health, 3,
+        lastRunDetails.length());
 
     JSONArray arr = info.getJSONObject("queues").getJSONArray("queue");
-    assertEquals("incorrect number of elements", 2, arr.length());
+    assertEquals("incorrect number of elements in: " + arr, 2, arr.length());
 
     // test subqueues
     for (int i = 0; i < arr.length(); i++) {
       JSONObject obj = arr.getJSONObject(i);
-      String q = CapacitySchedulerConfiguration.ROOT + "." + obj.getString("queueName");
+      String q = CapacitySchedulerConfiguration.ROOT + "." +
+              obj.getString("queueName");
       verifySubQueue(obj, q, 100, 100);
     }
   }
@@ -355,7 +359,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
     assertTrue("queueName doesn't match", "root".matches(queueName));
   }
 
-  private void verifySubQueue(JSONObject info, String q, 
+  private void verifySubQueue(JSONObject info, String q,
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws JSONException, Exception {
     int numExpectedElements = 20;
@@ -464,7 +468,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
         csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
   }
 
-  //Return a child Node of node with the tagname or null if none exists 
+  //Return a child Node of node with the tagname or null if none exists
   private Node getChildNodeByName(Node node, String tagname) {
     NodeList nodeList = node.getChildNodes();
     for (int i=0; i < nodeList.getLength(); ++i) {
@@ -514,7 +518,7 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase {
           for (int j=0; j<users.getLength(); ++j) {
             Node user = users.item(j);
             String username = getChildNodeByName(user, "username")
-              .getTextContent(); 
+                .getTextContent();
             assertTrue(username.equals("user1") || username.equals("user2"));
             //Should be a parsable integer
             Integer.parseInt(getChildNodeByName(getChildNodeByName(user,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
index 3d28f12..99b5648 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesConfigurationMutation.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.yarn.webapp.util.YarnWebServiceUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response.Status;
@@ -59,6 +61,8 @@ import static org.junit.Assert.assertNull;
  * Test scheduler configuration mutation via REST API.
  */
 public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
+  private static final Logger LOG = LoggerFactory
+          .getLogger(TestRMWebServicesConfigurationMutation.class);
 
   private static final File CONF_FILE = new File(new File("target",
       "test-classes"), YarnConfiguration.CS_CONFIGURATION_FILE);
@@ -396,6 +400,7 @@ public class TestRMWebServicesConfigurationMutation extends JerseyTestBase {
             .entity(YarnWebServiceUtils.toJson(updateInfo,
                 SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON)
             .put(ClientResponse.class);
+    LOG.debug("Response headers: " + response.getHeaders());
     assertEquals(Status.OK.getStatusCode(), response.getStatus());
     CapacitySchedulerConfiguration newCSConf = cs.getConfiguration();
     assertEquals(0.2f, newCSConf

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
index e77785b..58c72ee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesFairScheduler.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -16,13 +16,14 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import javax.ws.rs.core.MediaType;
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
 
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
@@ -30,6 +31,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
+
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
 import org.apache.hadoop.yarn.webapp.JerseyTestBase;
@@ -38,18 +42,18 @@ import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
 import org.junit.Before;
 import org.junit.Test;
+import javax.ws.rs.core.MediaType;
 
-import com.google.inject.Guice;
-import com.google.inject.servlet.ServletModule;
-import com.sun.jersey.api.client.ClientResponse;
-import com.sun.jersey.api.client.WebResource;
-import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.WebAppDescriptor;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
+/**
+ * Tests RM Webservices fair scheduler resources.
+ */
 public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   private static MockRM rm;
   private static YarnConfiguration conf;
-  
+
   private static class WebServletModule extends ServletModule {
     @Override
     protected void configureServlets() {
@@ -58,7 +62,7 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
       bind(GenericExceptionHandler.class);
       conf = new YarnConfiguration();
       conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
-        ResourceScheduler.class);
+          ResourceScheduler.class);
       rm = new MockRM(conf);
       bind(ResourceManager.class).toInstance(rm);
       serve("/*").with(GuiceContainer.class);
@@ -66,32 +70,32 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   static {
-    GuiceServletConfig.setInjector(
-        Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
   }
 
   @Before
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    GuiceServletConfig.setInjector(
-        Guice.createInjector(new WebServletModule()));
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
   }
 
   public TestRMWebServicesFairScheduler() {
     super(new WebAppDescriptor.Builder(
         "org.apache.hadoop.yarn.server.resourcemanager.webapp")
-        .contextListenerClass(GuiceServletConfig.class)
-        .filterClass(com.google.inject.servlet.GuiceFilter.class)
-        .contextPath("jersey-guice-filter").servletPath("/").build());
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
   }
-  
+
   @Test
-  public void testClusterScheduler() throws JSONException, Exception {
+  public void testClusterScheduler() throws JSONException {
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -99,52 +103,51 @@ public class TestRMWebServicesFairScheduler extends JerseyTestBase {
   }
 
   @Test
-  public void testClusterSchedulerSlash() throws JSONException, Exception {
+  public void testClusterSchedulerSlash() throws JSONException {
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler/").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler/")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     verifyClusterScheduler(json);
   }
-  
+
   @Test
-  public void testClusterSchedulerWithSubQueues() throws JSONException,
-      Exception {
-    FairScheduler scheduler = (FairScheduler)rm.getResourceScheduler();
+  public void testClusterSchedulerWithSubQueues()
+      throws JSONException {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
     QueueManager queueManager = scheduler.getQueueManager();
     // create LeafQueue
     queueManager.getLeafQueue("root.q.subqueue1", true);
     queueManager.getLeafQueue("root.q.subqueue2", true);
 
     WebResource r = resource();
-    ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("scheduler").accept(MediaType.APPLICATION_JSON)
-        .get(ClientResponse.class);
+    ClientResponse response =
+        r.path("ws").path("v1").path("cluster").path("scheduler")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
         response.getType().toString());
     JSONObject json = response.getEntity(JSONObject.class);
     JSONArray subQueueInfo = json.getJSONObject("scheduler")
         .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
-        .getJSONObject("childQueues").getJSONArray("queue")
-        .getJSONObject(1).getJSONObject("childQueues").getJSONArray("queue");
+        .getJSONObject("childQueues").getJSONArray("queue").getJSONObject(1)
+        .getJSONObject("childQueues").getJSONArray("queue");
     // subQueueInfo is consist of subqueue1 and subqueue2 info
     assertEquals(2, subQueueInfo.length());
 
     // Verify 'childQueues' field is omitted from FairSchedulerLeafQueueInfo.
     try {
       subQueueInfo.getJSONObject(1).getJSONObject("childQueues");
-      fail("FairSchedulerQueueInfo should omit field 'childQueues'" +
-           "if child queue is empty.");
+      fail("FairSchedulerQueueInfo should omit field 'childQueues'"
+          + "if child queue is empty.");
     } catch (JSONException je) {
       assertEquals("JSONObject[\"childQueues\"] not found.", je.getMessage());
     }
   }
 
-  private void verifyClusterScheduler(JSONObject json) throws JSONException,
-      Exception {
+  private void verifyClusterScheduler(JSONObject json) throws JSONException {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("scheduler");
     assertEquals("incorrect number of elements", 1, info.length());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
index 1e61186..40cf483 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesSchedulerActivities.java
@@ -457,7 +457,7 @@ public class TestRMWebServicesSchedulerActivities
       if (object.getClass() == JSONObject.class) {
         assertEquals("Number of allocations is wrong", 1, realValue);
       } else if (object.getClass() == JSONArray.class) {
-        assertEquals("Number of allocations is wrong",
+        assertEquals("Number of allocations is wrong in: " + object,
             ((JSONArray) object).length(), realValue);
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
new file mode 100644
index 0000000..bb1fce0
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/CustomResourceTypesConfigurationProvider.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.LocalConfigurationProvider;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static java.util.stream.Collectors.toList;
+
+/**
+ * This class can generate an XML configuration file of custom resource types.
+ * See createInitialResourceTypes for the default values. All custom resource
+ * type is prefixed with CUSTOM_RESOURCE_PREFIX. Please use the
+ * getConfigurationInputStream method to get an InputStream of the XML. If you
+ * want to have different number of resources in your tests, please see usages
+ * of this class in this test class:
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}
+ *
+ */
+public class CustomResourceTypesConfigurationProvider
+    extends LocalConfigurationProvider {
+
+  private static class CustomResourceTypes {
+    private int count;
+    private String xml;
+
+    CustomResourceTypes(String xml, int count) {
+      this.xml = xml;
+      this.count = count;
+    }
+
+    public int getCount() {
+      return count;
+    }
+
+    public String getXml() {
+      return xml;
+    }
+  }
+
+  private static final String CUSTOM_RESOURCE_PREFIX = "customResource-";
+
+  private static CustomResourceTypes customResourceTypes =
+      createInitialResourceTypes();
+
+  private static CustomResourceTypes createInitialResourceTypes() {
+    return createCustomResourceTypes(2);
+  }
+
+  private static CustomResourceTypes createCustomResourceTypes(int count) {
+    List<String> resourceTypeNames = generateResourceTypeNames(count);
+
+    List<String> resourceUnitXmlElements = IntStream.range(0, count)
+            .boxed()
+            .map(i -> getResourceUnitsXml(resourceTypeNames.get(i)))
+            .collect(toList());
+
+    StringBuilder sb = new StringBuilder("<configuration>\n");
+    sb.append(getResourceTypesXml(resourceTypeNames));
+
+    for (String resourceUnitXml : resourceUnitXmlElements) {
+      sb.append(resourceUnitXml);
+
+    }
+    sb.append("</configuration>");
+
+    return new CustomResourceTypes(sb.toString(), count);
+  }
+
+  private static List<String> generateResourceTypeNames(int count) {
+    return IntStream.range(0, count)
+            .boxed()
+            .map(i -> CUSTOM_RESOURCE_PREFIX + i)
+            .collect(toList());
+  }
+
+  private static String getResourceUnitsXml(String resource) {
+    return "<property>\n" + "<name>yarn.resource-types." + resource
+        + ".units</name>\n" + "<value>k</value>\n" + "</property>\n";
+  }
+
+  private static String getResourceTypesXml(List<String> resources) {
+    final String resourceTypes = makeCommaSeparatedString(resources);
+
+    return "<property>\n" + "<name>yarn.resource-types</name>\n" + "<value>"
+        + resourceTypes + "</value>\n" + "</property>\n";
+  }
+
+  private static String makeCommaSeparatedString(List<String> resources) {
+    return resources.stream().collect(Collectors.joining(","));
+  }
+
+  @Override
+  public InputStream getConfigurationInputStream(Configuration bootstrapConf,
+      String name) throws YarnException, IOException {
+    if (YarnConfiguration.RESOURCE_TYPES_CONFIGURATION_FILE.equals(name)) {
+      return new ByteArrayInputStream(
+          customResourceTypes.getXml().getBytes());
+    } else {
+      return super.getConfigurationInputStream(bootstrapConf, name);
+    }
+  }
+
+  public static void reset() {
+    customResourceTypes = createInitialResourceTypes();
+  }
+
+  public static void setNumberOfResourceTypes(int count) {
+    customResourceTypes = createCustomResourceTypes(count);
+  }
+
+  public static List<String> getCustomResourceTypes() {
+    return generateResourceTypeNames(customResourceTypes.getCount());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
new file mode 100644
index 0000000..924411a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerJsonVerifications.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This test helper class is primarily used by
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
+ */
+public class FairSchedulerJsonVerifications {
+
+  private static final Set<String> RESOURCE_FIELDS =
+      Sets.newHashSet("minResources", "amUsedResources", "amMaxResources",
+          "fairResources", "clusterResources", "reservedResources",
+              "maxResources", "usedResources", "steadyFairResources",
+              "demandResources");
+  private final Set<String> customResourceTypes;
+
+  FairSchedulerJsonVerifications(List<String> customResourceTypes) {
+    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
+  }
+
+  public void verify(JSONObject jsonObject) {
+    try {
+      verifyResourcesContainDefaultResourceTypes(jsonObject, RESOURCE_FIELDS);
+      verifyResourcesContainCustomResourceTypes(jsonObject, RESOURCE_FIELDS);
+    } catch (JSONException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void verifyResourcesContainDefaultResourceTypes(JSONObject queue,
+      Set<String> resourceCategories) throws JSONException {
+    for (String resourceCategory : resourceCategories) {
+      boolean hasResourceCategory = queue.has(resourceCategory);
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, hasResourceCategory);
+      verifyResourceContainsDefaultResourceTypes(
+          queue.getJSONObject(resourceCategory));
+    }
+  }
+
+  private void verifyResourceContainsDefaultResourceTypes(
+      JSONObject jsonObject) {
+    Object memory = jsonObject.opt("memory");
+    Object vCores = jsonObject.opt("vCores");
+
+    assertNotNull("Key 'memory' not found in: " + jsonObject, memory);
+    assertNotNull("Key 'vCores' not found in: " + jsonObject, vCores);
+  }
+
+  private void verifyResourcesContainCustomResourceTypes(JSONObject queue,
+      Set<String> resourceCategories) throws JSONException {
+    for (String resourceCategory : resourceCategories) {
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, queue.has(resourceCategory));
+      verifyResourceContainsAllCustomResourceTypes(
+          queue.getJSONObject(resourceCategory));
+    }
+  }
+
+  private void verifyResourceContainsAllCustomResourceTypes(
+      JSONObject resourceCategory) throws JSONException {
+    assertTrue("resourceCategory does not have resourceInformations: "
+        + resourceCategory, resourceCategory.has("resourceInformations"));
+
+    JSONObject resourceInformations =
+        resourceCategory.getJSONObject("resourceInformations");
+    assertTrue(
+        "resourceInformations does not have resourceInformation object: "
+            + resourceInformations,
+        resourceInformations.has("resourceInformation"));
+    JSONArray customResources =
+        resourceInformations.getJSONArray("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        customResourceTypes.size(), customResources.length() - 2);
+
+    for (int i = 0; i < customResources.length(); i++) {
+      JSONObject customResource = customResources.getJSONObject(i);
+      assertTrue("Resource type does not have name field: " + customResource,
+          customResource.has("name"));
+      assertTrue("Resource type does not have name resourceType field: "
+          + customResource, customResource.has("resourceType"));
+      assertTrue(
+          "Resource type does not have name units field: " + customResource,
+          customResource.has("units"));
+      assertTrue(
+          "Resource type does not have name value field: " + customResource,
+          customResource.has("value"));
+
+      String name = customResource.getString("name");
+      String unit = customResource.getString("units");
+      String resourceType = customResource.getString("resourceType");
+      Long value = customResource.getLong("value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          customResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Custom resource value " + value + " is null!", value);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
new file mode 100644
index 0000000..63ae7b7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/FairSchedulerXmlVerifications.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * This test helper class is primarily used by
+ * {@link TestRMWebServicesFairSchedulerCustomResourceTypes}.
+ */
+public class FairSchedulerXmlVerifications {
+
+  private static final Set<String> RESOURCE_FIELDS = Sets.newHashSet(
+      "minResources", "amUsedResources", "amMaxResources", "fairResources",
+      "clusterResources", "reservedResources", "maxResources", "usedResources",
+      "steadyFairResources", "demandResources");
+  private final Set<String> customResourceTypes;
+
+  FairSchedulerXmlVerifications(List<String> customResourceTypes) {
+    this.customResourceTypes = Sets.newHashSet(customResourceTypes);
+  }
+
+  public void verify(Element element) {
+    verifyResourcesContainDefaultResourceTypes(element, RESOURCE_FIELDS);
+    verifyResourcesContainCustomResourceTypes(element, RESOURCE_FIELDS);
+  }
+
+  private void verifyResourcesContainDefaultResourceTypes(Element queue,
+      Set<String> resourceCategories) {
+    for (String resourceCategory : resourceCategories) {
+      boolean hasResourceCategory = hasChild(queue, resourceCategory);
+      assertTrue("Queue " + queue + " does not have resource category key: "
+          + resourceCategory, hasResourceCategory);
+      verifyResourceContainsDefaultResourceTypes(
+              (Element) queue.getElementsByTagName(resourceCategory).item(0));
+    }
+  }
+
+  private void verifyResourceContainsDefaultResourceTypes(
+      Element element) {
+    Object memory = opt(element, "memory");
+    Object vCores = opt(element, "vCores");
+
+    assertNotNull("Key 'memory' not found in: " + element, memory);
+    assertNotNull("Key 'vCores' not found in: " + element, vCores);
+  }
+
+  private void verifyResourcesContainCustomResourceTypes(Element queue,
+      Set<String> resourceCategories) {
+    for (String resourceCategory : resourceCategories) {
+      assertTrue("Queue " + queue + " does not have key for resourceCategory: "
+          + resourceCategory, hasChild(queue, resourceCategory));
+      verifyResourceContainsCustomResourceTypes(
+              (Element) queue.getElementsByTagName(resourceCategory).item(0));
+    }
+  }
+
+  private void verifyResourceContainsCustomResourceTypes(
+      Element resourceCategory) {
+    assertEquals(
+        toXml(resourceCategory)
+            + " should have only one resourceInformations child!",
+        1, resourceCategory.getElementsByTagName("resourceInformations")
+            .getLength());
+    Element resourceInformations = (Element) resourceCategory
+        .getElementsByTagName("resourceInformations").item(0);
+
+    NodeList customResources =
+        resourceInformations.getElementsByTagName("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        customResourceTypes.size(), customResources.getLength() - 2);
+
+    for (int i = 0; i < customResources.getLength(); i++) {
+      Element customResource = (Element) customResources.item(i);
+      String name = getXmlString(customResource, "name");
+      String unit = getXmlString(customResource, "units");
+      String resourceType = getXmlString(customResource, "resourceType");
+      Long value = getXmlLong(customResource, "value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          customResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Resource value should not be null for resource type "
+          + resourceType + ", listing xml contents: " + toXml(customResource),
+          value);
+    }
+  }
+
+  private Object opt(Node node, String child) {
+    NodeList nodes = getElementsByTagNameInternal(node, child);
+    if (nodes.getLength() > 0) {
+      return nodes.item(0);
+    }
+
+    return null;
+  }
+
+  private boolean hasChild(Node node, String child) {
+    return getElementsByTagNameInternal(node, child).getLength() > 0;
+  }
+
+  private NodeList getElementsByTagNameInternal(Node node, String child) {
+    if (node instanceof Element) {
+      return ((Element) node).getElementsByTagName(child);
+    } else if (node instanceof Document) {
+      return ((Document) node).getElementsByTagName(child);
+    } else {
+      throw new IllegalStateException("Unknown type of wrappedObject: " + node
+          + ", type: " + node.getClass());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
new file mode 100644
index 0000000..de4d5a1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/fairscheduler/TestRMWebServicesFairSchedulerCustomResourceTypes.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.fairscheduler;
+
+import com.google.inject.Guice;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSLeafQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.QueueManager;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.*;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.GuiceServletConfig;
+import org.apache.hadoop.yarn.webapp.JerseyTestBase;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Element;
+import javax.ws.rs.core.MediaType;
+import java.lang.reflect.Method;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class is to test response representations of queue resources,
+ * explicitly setting custom resource types. with the help of
+ * {@link CustomResourceTypesConfigurationProvider}
+ */
+public class TestRMWebServicesFairSchedulerCustomResourceTypes
+    extends JerseyTestBase {
+  private static MockRM rm;
+  private static YarnConfiguration conf;
+
+  private static class WebServletModule extends ServletModule {
+    @Override
+    protected void configureServlets() {
+      bind(JAXBContextResolver.class);
+      bind(RMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      conf = new YarnConfiguration();
+      conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
+          ResourceScheduler.class);
+      initResourceTypes(conf);
+      rm = new MockRM(conf);
+      bind(ResourceManager.class).toInstance(rm);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+    private void initResourceTypes(YarnConfiguration conf) {
+      conf.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+          CustomResourceTypesConfigurationProvider.class.getName());
+      ResourceUtils.resetResourceTypes(conf);
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    createInjectorForWebServletModule();
+  }
+
+  @After
+  public void tearDown() {
+    ResourceUtils.resetResourceTypes(new Configuration());
+  }
+
+  private void createInjectorForWebServletModule() {
+    GuiceServletConfig
+        .setInjector(Guice.createInjector(new WebServletModule()));
+  }
+
+  @After
+  public void teardown() {
+    CustomResourceTypesConfigurationProvider.reset();
+  }
+
+  public TestRMWebServicesFairSchedulerCustomResourceTypes() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.resourcemanager.webapp")
+            .contextListenerClass(GuiceServletConfig.class)
+            .filterClass(com.google.inject.servlet.GuiceFilter.class)
+            .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testClusterSchedulerWithCustomResourceTypesJson() {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    verifyJsonResponse(path, response,
+            CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerWithCustomResourceTypesXml() {
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    verifyXmlResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerWithElevenCustomResourceTypesXml() {
+    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
+    createInjectorForWebServletModule();
+
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+    verifyXmlResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  @Test
+  public void testClusterSchedulerElevenWithCustomResourceTypesJson() {
+    CustomResourceTypesConfigurationProvider.setNumberOfResourceTypes(11);
+    createInjectorForWebServletModule();
+
+    FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
+    QueueManager queueManager = scheduler.getQueueManager();
+    // create LeafQueues
+    queueManager.getLeafQueue("root.q.subqueue1", true);
+    queueManager.getLeafQueue("root.q.subqueue2", true);
+
+    FSLeafQueue subqueue1 =
+        queueManager.getLeafQueue("root.q.subqueue1", false);
+    incrementUsedResourcesOnQueue(subqueue1, 33L);
+
+    WebResource path =
+        resource().path("ws").path("v1").path("cluster").path("scheduler");
+    ClientResponse response =
+        path.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+
+    verifyJsonResponse(path, response,
+        CustomResourceTypesConfigurationProvider.getCustomResourceTypes());
+  }
+
+  private void verifyJsonResponse(WebResource path, ClientResponse response,
+      List<String> customResourceTypes) {
+    JsonCustomResourceTypeTestcase testCase =
+        new JsonCustomResourceTypeTestcase(path,
+            new BufferedClientResponse(response));
+    testCase.verify(json -> {
+      try {
+        JSONArray queues = json.getJSONObject("scheduler")
+            .getJSONObject("schedulerInfo").getJSONObject("rootQueue")
+            .getJSONObject("childQueues").getJSONArray("queue");
+
+        // childQueueInfo consists of subqueue1 and subqueue2 info
+        assertEquals(2, queues.length());
+        JSONObject firstChildQueue = queues.getJSONObject(0);
+        new FairSchedulerJsonVerifications(customResourceTypes)
+            .verify(firstChildQueue);
+      } catch (JSONException e) {
+        throw new RuntimeException(e);
+      }
+    });
+  }
+
+  private void verifyXmlResponse(WebResource path, ClientResponse response,
+          List<String> customResourceTypes) {
+    XmlCustomResourceTypeTestCase testCase = new XmlCustomResourceTypeTestCase(
+        path, new BufferedClientResponse(response));
+
+    testCase.verify(xml -> {
+      Element scheduler =
+          (Element) xml.getElementsByTagName("scheduler").item(0);
+      Element schedulerInfo =
+          (Element) scheduler.getElementsByTagName("schedulerInfo").item(0);
+      Element rootQueue =
+          (Element) schedulerInfo.getElementsByTagName("rootQueue").item(0);
+
+      Element childQueues =
+          (Element) rootQueue.getElementsByTagName("childQueues").item(0);
+      Element queue =
+          (Element) childQueues.getElementsByTagName("queue").item(0);
+      new FairSchedulerXmlVerifications(customResourceTypes).verify(queue);
+    });
+  }
+
+  private void incrementUsedResourcesOnQueue(final FSLeafQueue queue,
+      final long value) {
+    try {
+      Method incUsedResourceMethod = queue.getClass().getSuperclass()
+          .getDeclaredMethod("incUsedResource", Resource.class);
+      incUsedResourceMethod.setAccessible(true);
+
+      Map<String, Long> customResources =
+          CustomResourceTypesConfigurationProvider.getCustomResourceTypes()
+              .stream()
+              .collect(Collectors.toMap(Function.identity(), v -> value));
+
+      incUsedResourceMethod.invoke(queue,
+          Resource.newInstance(20, 30, customResources));
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
new file mode 100644
index 0000000..4ab1443
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoJsonVerifications.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringEqual;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.junit.Assert.*;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * JSON objects.
+ */
+public final class AppInfoJsonVerifications {
+
+  private AppInfoJsonVerifications() {
+    //utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param  app  an RMApp instance that contains the required values
+   *              to test against.
+   */
+  public static void verify(JSONObject info, RMApp app) throws JSONException {
+    checkStringMatch("id", app.getApplicationId().toString(),
+        info.getString("id"));
+    checkStringMatch("user", app.getUser(), info.getString("user"));
+    checkStringMatch("name", app.getName(), info.getString("name"));
+    checkStringMatch("applicationType", app.getApplicationType(),
+        info.getString("applicationType"));
+    checkStringMatch("queue", app.getQueue(), info.getString("queue"));
+    assertEquals("priority doesn't match", 0, info.getInt("priority"));
+    checkStringMatch("state", app.getState().toString(),
+        info.getString("state"));
+    checkStringMatch("finalStatus", app.getFinalApplicationStatus().toString(),
+        info.getString("finalStatus"));
+    assertEquals("progress doesn't match", 0,
+        (float) info.getDouble("progress"), 0.0);
+    if ("UNASSIGNED".equals(info.getString("trackingUI"))) {
+      checkStringMatch("trackingUI", "UNASSIGNED",
+          info.getString("trackingUI"));
+    }
+    checkStringEqual("diagnostics", app.getDiagnostics().toString(),
+        info.getString("diagnostics"));
+    assertEquals("clusterId doesn't match",
+        ResourceManager.getClusterTimeStamp(), info.getLong("clusterId"));
+    assertEquals("startedTime doesn't match", app.getStartTime(),
+        info.getLong("startedTime"));
+    assertEquals("finishedTime doesn't match", app.getFinishTime(),
+        info.getLong("finishedTime"));
+    assertTrue("elapsed time not greater than 0",
+        info.getLong("elapsedTime") > 0);
+    checkStringMatch("amHostHttpAddress",
+        app.getCurrentAppAttempt().getMasterContainer().getNodeHttpAddress(),
+        info.getString("amHostHttpAddress"));
+    assertTrue("amContainerLogs doesn't match",
+        info.getString("amContainerLogs").startsWith("http://"));
+    assertTrue("amContainerLogs doesn't contain user info",
+        info.getString("amContainerLogs").endsWith("/" + app.getUser()));
+    assertEquals("allocatedMB doesn't match", 1024, info.getInt("allocatedMB"));
+    assertEquals("allocatedVCores doesn't match", 1,
+        info.getInt("allocatedVCores"));
+    assertEquals("queueUsagePerc doesn't match", 50.0f,
+        (float) info.getDouble("queueUsagePercentage"), 0.01f);
+    assertEquals("clusterUsagePerc doesn't match", 50.0f,
+        (float) info.getDouble("clusterUsagePercentage"), 0.01f);
+    assertEquals("numContainers doesn't match", 1,
+        info.getInt("runningContainers"));
+    assertNotNull("preemptedResourceSecondsMap should not be null",
+        info.getJSONObject("preemptedResourceSecondsMap"));
+    assertEquals("preemptedResourceMB doesn't match",
+        app.getRMAppMetrics().getResourcePreempted().getMemorySize(),
+        info.getInt("preemptedResourceMB"));
+    assertEquals("preemptedResourceVCores doesn't match",
+        app.getRMAppMetrics().getResourcePreempted().getVirtualCores(),
+        info.getInt("preemptedResourceVCores"));
+    assertEquals("numNonAMContainerPreempted doesn't match",
+        app.getRMAppMetrics().getNumNonAMContainersPreempted(),
+        info.getInt("numNonAMContainerPreempted"));
+    assertEquals("numAMContainerPreempted doesn't match",
+        app.getRMAppMetrics().getNumAMContainersPreempted(),
+        info.getInt("numAMContainerPreempted"));
+    assertEquals("Log aggregation Status doesn't match",
+        app.getLogAggregationStatusForAppReport().toString(),
+        info.getString("logAggregationStatus"));
+    assertEquals("unmanagedApplication doesn't match",
+        app.getApplicationSubmissionContext().getUnmanagedAM(),
+        info.getBoolean("unmanagedApplication"));
+
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      assertEquals("appNodeLabelExpression doesn't match",
+          app.getApplicationSubmissionContext().getNodeLabelExpression(),
+          info.getString("appNodeLabelExpression"));
+    }
+    assertEquals("amNodeLabelExpression doesn't match",
+        app.getAMResourceRequests().get(0).getNodeLabelExpression(),
+        info.getString("amNodeLabelExpression"));
+    assertEquals("amRPCAddress",
+        AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
+        info.getString("amRPCAddress"));
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDDS-175. Refactor ContainerInfo to remove Pipeline object from it. Contributed by Ajay Kumar.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index d06d568..9255ec7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -22,6 +22,7 @@ import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.Mapping;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -362,10 +363,16 @@ public class TestDeletedBlockLog {
     pipeline.addMember(dd);
 
     ContainerInfo.Builder builder = new ContainerInfo.Builder();
-    builder.setPipeline(pipeline);
-
-    ContainerInfo conatinerInfo = builder.build();
-    Mockito.doReturn(conatinerInfo).when(mappingService)
+    builder.setPipelineName(pipeline.getPipelineName())
+        .setReplicationType(pipeline.getType())
+        .setReplicationFactor(pipeline.getFactor());
+
+    ContainerInfo containerInfo = builder.build();
+    ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(
+        containerInfo, pipeline);
+    Mockito.doReturn(containerInfo).when(mappingService)
         .getContainer(containerID);
+    Mockito.doReturn(containerWithPipeline).when(mappingService)
+        .getContainerWithPipeline(containerID);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 09ade3e..721dbf6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -97,7 +97,7 @@ public class TestCloseContainerEventHandler {
         new ContainerID(id));
     eventQueue.processAll(1000);
     Assert.assertTrue(logCapturer.getOutput()
-        .contains("Container with id : " + id + " does not exist"));
+        .contains("Failed to update the container state"));
   }
 
   @Test
@@ -105,11 +105,12 @@ public class TestCloseContainerEventHandler {
 
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
-    ContainerInfo info = mapping
+    ContainerWithPipeline containerWithPipeline = mapping
         .allocateContainer(HddsProtos.ReplicationType.STAND_ALONE,
             HddsProtos.ReplicationFactor.ONE, "ozone");
-    ContainerID id = new ContainerID(info.getContainerID());
-    DatanodeDetails datanode = info.getPipeline().getLeader();
+    ContainerID id = new ContainerID(
+        containerWithPipeline.getContainerInfo().getContainerID());
+    DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
     int closeCount = nodeManager.getCommandCount(datanode);
     eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
     eventQueue.processAll(1000);
@@ -125,7 +126,8 @@ public class TestCloseContainerEventHandler {
     mapping.updateContainerState(id.getId(), CREATE);
     mapping.updateContainerState(id.getId(), CREATED);
     eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
-        new ContainerID(info.getContainerID()));
+        new ContainerID(
+            containerWithPipeline.getContainerInfo().getContainerID()));
     eventQueue.processAll(1000);
     Assert.assertEquals(closeCount + 1, nodeManager.getCommandCount(datanode));
     Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
@@ -137,20 +139,23 @@ public class TestCloseContainerEventHandler {
 
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
-    ContainerInfo info = mapping
+    ContainerWithPipeline containerWithPipeline = mapping
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE, "ozone");
-    ContainerID id = new ContainerID(info.getContainerID());
+    ContainerID id = new ContainerID(
+        containerWithPipeline.getContainerInfo().getContainerID());
     int[] closeCount = new int[3];
     eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
     eventQueue.processAll(1000);
     int i = 0;
-    for (DatanodeDetails details : info.getPipeline().getMachines()) {
+    for (DatanodeDetails details : containerWithPipeline.getPipeline()
+        .getMachines()) {
       closeCount[i] = nodeManager.getCommandCount(details);
       i++;
     }
     i = 0;
-    for (DatanodeDetails details : info.getPipeline().getMachines()) {
+    for (DatanodeDetails details : containerWithPipeline.getPipeline()
+        .getMachines()) {
       Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details));
       i++;
     }
@@ -161,12 +166,12 @@ public class TestCloseContainerEventHandler {
     //Execute these state transitions so that we can close the container.
     mapping.updateContainerState(id.getId(), CREATE);
     mapping.updateContainerState(id.getId(), CREATED);
-    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
-        new ContainerID(info.getContainerID()));
+    eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
     eventQueue.processAll(1000);
     i = 0;
     // Make sure close is queued for each datanode on the pipeline
-    for (DatanodeDetails details : info.getPipeline().getMachines()) {
+    for (DatanodeDetails details : containerWithPipeline.getPipeline()
+        .getMachines()) {
       Assert.assertEquals(closeCount[i] + 1,
           nodeManager.getCommandCount(details));
       Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index eefb639..42ab126 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -103,7 +104,7 @@ public class TestContainerMapping {
 
   @Test
   public void testallocateContainer() throws Exception {
-    ContainerInfo containerInfo = mapping.allocateContainer(
+    ContainerWithPipeline containerInfo = mapping.allocateContainer(
         xceiverClientManager.getType(),
         xceiverClientManager.getFactor(),
         containerOwner);
@@ -120,7 +121,7 @@ public class TestContainerMapping {
      */
     Set<UUID> pipelineList = new TreeSet<>();
     for (int x = 0; x < 30; x++) {
-      ContainerInfo containerInfo = mapping.allocateContainer(
+      ContainerWithPipeline containerInfo = mapping.allocateContainer(
           xceiverClientManager.getType(),
           xceiverClientManager.getFactor(),
           containerOwner);
@@ -135,14 +136,13 @@ public class TestContainerMapping {
 
   @Test
   public void testGetContainer() throws IOException {
-    ContainerInfo containerInfo = mapping.allocateContainer(
+    ContainerWithPipeline containerInfo = mapping.allocateContainer(
         xceiverClientManager.getType(),
         xceiverClientManager.getFactor(),
         containerOwner);
     Pipeline pipeline  = containerInfo.getPipeline();
     Assert.assertNotNull(pipeline);
-    Pipeline newPipeline = mapping.getContainer(
-        containerInfo.getContainerID()).getPipeline();
+    Pipeline newPipeline = containerInfo.getPipeline();
     Assert.assertEquals(pipeline.getLeader().getUuid(),
         newPipeline.getLeader().getUuid());
   }
@@ -165,12 +165,12 @@ public class TestContainerMapping {
   public void testContainerCreationLeaseTimeout() throws IOException,
       InterruptedException {
     nodeManager.setChillmode(false);
-    ContainerInfo containerInfo = mapping.allocateContainer(
+    ContainerWithPipeline containerInfo = mapping.allocateContainer(
         xceiverClientManager.getType(),
         xceiverClientManager.getFactor(),
         containerOwner);
-    mapping.updateContainerState(containerInfo.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATE);
+    mapping.updateContainerState(containerInfo.getContainerInfo()
+            .getContainerID(), HddsProtos.LifeCycleEvent.CREATE);
     Thread.sleep(TIMEOUT + 1000);
 
     NavigableSet<ContainerID> deleteContainers = mapping.getStateManager()
@@ -179,12 +179,14 @@ public class TestContainerMapping {
             xceiverClientManager.getType(),
             xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.DELETING);
-    Assert.assertTrue(deleteContainers.contains(containerInfo.containerID()));
+    Assert.assertTrue(deleteContainers
+        .contains(containerInfo.getContainerInfo().containerID()));
 
     thrown.expect(IOException.class);
     thrown.expectMessage("Lease Exception");
-    mapping.updateContainerState(containerInfo.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATED);
+    mapping
+        .updateContainerState(containerInfo.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATED);
   }
 
   @Test
@@ -294,10 +296,11 @@ public class TestContainerMapping {
   private ContainerInfo createContainer()
       throws IOException {
     nodeManager.setChillmode(false);
-    ContainerInfo containerInfo = mapping.allocateContainer(
+    ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
         xceiverClientManager.getType(),
         xceiverClientManager.getFactor(),
         containerOwner);
+    ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
     mapping.updateContainerState(containerInfo.getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
     mapping.updateContainerState(containerInfo.getContainerID(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
index 0d7848f..74238a7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.container.TestContainerMapping;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -91,9 +92,10 @@ public class TestContainerCloser {
 
   @Test
   public void testClose() throws IOException {
-    ContainerInfo info = mapping.allocateContainer(
+    ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
         HddsProtos.ReplicationType.STAND_ALONE,
         HddsProtos.ReplicationFactor.ONE, "ozone");
+    ContainerInfo info = containerWithPipeline.getContainerInfo();
 
     //Execute these state transitions so that we can close the container.
     mapping.updateContainerState(info.getContainerID(), CREATE);
@@ -101,7 +103,7 @@ public class TestContainerCloser {
     long currentCount = mapping.getCloser().getCloseCount();
     long runCount = mapping.getCloser().getThreadRunCount();
 
-    DatanodeDetails datanode = info.getPipeline().getLeader();
+    DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
     // Send a container report with used set to 1 GB. This should not close.
     sendContainerReport(info, 1 * GIGABYTE);
 
@@ -138,9 +140,10 @@ public class TestContainerCloser {
     configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1,
         TimeUnit.SECONDS);
 
-    ContainerInfo info = mapping.allocateContainer(
+    ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
         HddsProtos.ReplicationType.STAND_ALONE,
         HddsProtos.ReplicationFactor.ONE, "ozone");
+    ContainerInfo info = containerWithPipeline.getContainerInfo();
 
     //Execute these state transitions so that we can close the container.
     mapping.updateContainerState(info.getContainerID(), CREATE);
@@ -148,10 +151,10 @@ public class TestContainerCloser {
     long currentCount = mapping.getCloser().getCloseCount();
     long runCount = mapping.getCloser().getThreadRunCount();
 
+    DatanodeDetails datanodeDetails = containerWithPipeline.getPipeline()
+        .getLeader();
 
-    DatanodeDetails datanodeDetails = info.getPipeline().getLeader();
-
-    // Send this command twice and assert we have only one command in the queue.
+    // Send this command twice and assert we have only one command in queue.
     sendContainerReport(info, 5 * GIGABYTE);
     sendContainerReport(info, 5 * GIGABYTE);
 
@@ -183,9 +186,10 @@ public class TestContainerCloser {
     long runCount = mapping.getCloser().getThreadRunCount();
 
     for (int x = 0; x < ContainerCloser.getCleanupWaterMark() + 10; x++) {
-      ContainerInfo info = mapping.allocateContainer(
+      ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
           HddsProtos.ReplicationType.STAND_ALONE,
           HddsProtos.ReplicationFactor.ONE, "ozone");
+      ContainerInfo info = containerWithPipeline.getContainerInfo();
       mapping.updateContainerState(info.getContainerID(), CREATE);
       mapping.updateContainerState(info.getContainerID(), CREATED);
       sendContainerReport(info, 5 * GIGABYTE);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 5ad28f6..98b0a28 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
@@ -151,11 +151,11 @@ public class TestContainerPlacement {
 
       assertTrue(nodeManager.isOutOfChillMode());
 
-      ContainerInfo containerInfo = containerManager.allocateContainer(
+      ContainerWithPipeline containerWithPipeline = containerManager.allocateContainer(
           xceiverClientManager.getType(),
           xceiverClientManager.getFactor(), "OZONE");
       assertEquals(xceiverClientManager.getFactor().getNumber(),
-          containerInfo.getPipeline().getMachines().size());
+          containerWithPipeline.getPipeline().getMachines().size());
     } finally {
       IOUtils.closeQuietly(containerManager);
       IOUtils.closeQuietly(nodeManager);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
index 4f3b143..e2267da 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
@@ -24,9 +24,9 @@ import org.apache.commons.cli.Options;
 import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 
 import java.io.IOException;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 
 /**
  * The handler of close container command.
@@ -51,15 +51,15 @@ public class CloseContainerHandler extends OzoneCommandHandler {
     }
     String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
 
-    ContainerInfo container = getScmClient().
-        getContainer(Long.parseLong(containerID));
+    ContainerWithPipeline container = getScmClient().
+        getContainerWithPipeline(Long.parseLong(containerID));
     if (container == null) {
       throw new IOException("Cannot close an non-exist container "
           + containerID);
     }
     logOut("Closing container : %s.", containerID);
-    getScmClient().closeContainer(container.getContainerID(),
-        container.getPipeline());
+    getScmClient()
+        .closeContainer(container.getContainerInfo().getContainerID());
     logOut("Container closed.");
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
index 20a6d9e..1b26665 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
@@ -25,9 +25,9 @@ import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 
 import java.io.IOException;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 
 import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
 import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
@@ -60,7 +60,7 @@ public class DeleteContainerHandler extends OzoneCommandHandler {
 
     String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
 
-    ContainerInfo container = getScmClient().getContainer(
+    ContainerWithPipeline container = getScmClient().getContainerWithPipeline(
         Long.parseLong(containerID));
     if (container == null) {
       throw new IOException("Cannot delete an non-exist container "
@@ -68,8 +68,9 @@ public class DeleteContainerHandler extends OzoneCommandHandler {
     }
 
     logOut("Deleting container : %s.", containerID);
-    getScmClient().deleteContainer(container.getContainerID(),
-        container.getPipeline(), cmd.hasOption(OPT_FORCE));
+    getScmClient()
+        .deleteContainer(container.getContainerInfo().getContainerID(),
+            container.getPipeline(), cmd.hasOption(OPT_FORCE));
     logOut("Container %s deleted.", containerID);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index 6027bec..3716ace 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -24,7 +24,6 @@ import org.apache.commons.cli.Option;
 import org.apache.commons.cli.Options;
 import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerData;
@@ -33,6 +32,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 
 import java.io.IOException;
 import java.util.stream.Collectors;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 
 import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
 import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
@@ -68,13 +68,12 @@ public class InfoContainerHandler extends OzoneCommandHandler {
       }
     }
     String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
-    ContainerInfo container = getScmClient().
-        getContainer(Long.parseLong(containerID));
+    ContainerWithPipeline container = getScmClient().
+        getContainerWithPipeline(Long.parseLong(containerID));
     Preconditions.checkNotNull(container, "Container cannot be null");
 
-    ContainerData containerData =
-        getScmClient().readContainer(container.getContainerID(),
-            container.getPipeline());
+    ContainerData containerData = getScmClient().readContainer(container
+        .getContainerInfo().getContainerID(), container.getPipeline());
 
     // Print container report info.
     logOut("Container id: %s", containerID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index e1a2918..edd85aa 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -21,8 +21,8 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -271,17 +271,17 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
       KsmKeyLocationInfo ksmKeyLocationInfo = keyLocationInfos.get(i);
       BlockID blockID = ksmKeyLocationInfo.getBlockID();
       long containerID = blockID.getContainerID();
-      ContainerInfo container =
-          storageContainerLocationClient.getContainer(containerID);
-      XceiverClientSpi xceiverClient =
-          xceiverClientManager.acquireClient(container.getPipeline(), containerID);
+      ContainerWithPipeline containerWithPipeline =
+          storageContainerLocationClient.getContainerWithPipeline(containerID);
+      XceiverClientSpi xceiverClient = xceiverClientManager
+          .acquireClient(containerWithPipeline.getPipeline(), containerID);
       boolean success = false;
       containerKey = ksmKeyLocationInfo.getLocalID();
       try {
         LOG.debug("get key accessing {} {}",
             containerID, containerKey);
         groupInputStream.streamOffset[i] = length;
-          ContainerProtos.KeyData containerKeyData = OzoneContainerTranslation
+        ContainerProtos.KeyData containerKeyData = OzoneContainerTranslation
             .containerKeyDataForRead(blockID);
         ContainerProtos.GetKeyResponseProto response = ContainerProtocolCalls
             .getKey(xceiverClient, containerKeyData, requestId);
@@ -292,7 +292,8 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
         }
         success = true;
         ChunkInputStream inputStream = new ChunkInputStream(
-            ksmKeyLocationInfo.getBlockID(), xceiverClientManager, xceiverClient,
+            ksmKeyLocationInfo.getBlockID(), xceiverClientManager,
+            xceiverClient,
             chunks, requestId);
         groupInputStream.addStream(inputStream,
             ksmKeyLocationInfo.getLength());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index c6e56b3..d1a3b46 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -163,10 +164,12 @@ public class ChunkGroupOutputStream extends OutputStream {
 
   private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo)
       throws IOException {
-    ContainerInfo container = scmClient.getContainer(
-        subKeyInfo.getContainerID());
+    ContainerWithPipeline containerWithPipeline = scmClient
+        .getContainerWithPipeline(subKeyInfo.getContainerID());
+    ContainerInfo container = containerWithPipeline.getContainerInfo();
+
     XceiverClientSpi xceiverClient =
-        xceiverClientManager.acquireClient(container.getPipeline(),
+        xceiverClientManager.acquireClient(containerWithPipeline.getPipeline(),
             container.getContainerID());
     // create container if needed
     if (subKeyInfo.getShouldCreateContainer()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java
new file mode 100644
index 0000000..8361bac
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+/**
+ * Helper class for converting protobuf objects.
+ */
+public final class OzonePBHelper {
+
+  private OzonePBHelper() {
+    /** Hidden constructor */
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index bedd5c4..bb85650 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.container;
 
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -30,7 +31,6 @@ import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
-import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.NavigableSet;
@@ -71,31 +71,35 @@ public class TestContainerStateManager {
   @Test
   public void testAllocateContainer() throws IOException {
     // Allocate a container and verify the container info
-    ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerOwner);
+    ContainerWithPipeline container1 = scm.getClientProtocolServer()
+        .allocateContainer(
+            xceiverClientManager.getType(),
+            xceiverClientManager.getFactor(), containerOwner);
     ContainerInfo info = containerStateManager
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.ALLOCATED);
-    Assert.assertEquals(container1.getContainerID(), info.getContainerID());
+    Assert.assertEquals(container1.getContainerInfo().getContainerID(),
+        info.getContainerID());
     Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes());
     Assert.assertEquals(containerOwner, info.getOwner());
     Assert.assertEquals(xceiverClientManager.getType(),
-        info.getPipeline().getType());
+        info.getReplicationType());
     Assert.assertEquals(xceiverClientManager.getFactor(),
-        info.getPipeline().getFactor());
+        info.getReplicationFactor());
     Assert.assertEquals(HddsProtos.LifeCycleState.ALLOCATED, info.getState());
 
     // Check there are two containers in ALLOCATED state after allocation
-    ContainerInfo container2 = scm.getClientProtocolServer().allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerOwner);
+    ContainerWithPipeline container2 = scm.getClientProtocolServer()
+        .allocateContainer(
+            xceiverClientManager.getType(),
+            xceiverClientManager.getFactor(), containerOwner);
     int numContainers = containerStateManager
         .getMatchingContainerIDs(containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.ALLOCATED).size();
-    Assert.assertNotEquals(container1.getContainerID(), container2.getContainerID());
+    Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
+        container2.getContainerInfo().getContainerID());
     Assert.assertEquals(2, numContainers);
   }
 
@@ -105,14 +109,15 @@ public class TestContainerStateManager {
 
     List<ContainerInfo> containers = new ArrayList<>();
     for (int i = 0; i < 10; i++) {
-      ContainerInfo container = scm.getClientProtocolServer().allocateContainer(
-          xceiverClientManager.getType(),
-          xceiverClientManager.getFactor(), containerOwner);
-      containers.add(container);
+      ContainerWithPipeline container = scm.getClientProtocolServer()
+          .allocateContainer(
+              xceiverClientManager.getType(),
+              xceiverClientManager.getFactor(), containerOwner);
+      containers.add(container.getContainerInfo());
       if (i >= 5) {
-        scm.getScmContainerManager()
-            .updateContainerState(container.getContainerID(),
-                HddsProtos.LifeCycleEvent.CREATE);
+        scm.getScmContainerManager().updateContainerState(container
+                .getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATE);
       }
     }
 
@@ -134,34 +139,40 @@ public class TestContainerStateManager {
 
   @Test
   public void testGetMatchingContainer() throws IOException {
-    ContainerInfo container1 = scm.getClientProtocolServer().
+    ContainerWithPipeline container1 = scm.getClientProtocolServer().
         allocateContainer(xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerOwner);
-    scmContainerMapping.updateContainerState(container1.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATE);
-    scmContainerMapping.updateContainerState(container1.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATED);
+            xceiverClientManager.getFactor(), containerOwner);
+    scmContainerMapping
+        .updateContainerState(container1.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATE);
+    scmContainerMapping
+        .updateContainerState(container1.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATED);
 
-    ContainerInfo container2 = scm.getClientProtocolServer().
+    ContainerWithPipeline container2 = scm.getClientProtocolServer().
         allocateContainer(xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerOwner);
+            xceiverClientManager.getFactor(), containerOwner);
 
     ContainerInfo info = containerStateManager
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.OPEN);
-    Assert.assertEquals(container1.getContainerID(), info.getContainerID());
+    Assert.assertEquals(container1.getContainerInfo().getContainerID(),
+        info.getContainerID());
 
     info = containerStateManager
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.ALLOCATED);
-    Assert.assertEquals(container2.getContainerID(), info.getContainerID());
+    Assert.assertEquals(container2.getContainerInfo().getContainerID(),
+        info.getContainerID());
 
-    scmContainerMapping.updateContainerState(container2.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATE);
-    scmContainerMapping.updateContainerState(container2.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATED);
+    scmContainerMapping
+        .updateContainerState(container2.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATE);
+    scmContainerMapping
+        .updateContainerState(container2.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATED);
 
     // space has already been allocated in container1, now container 2 should
     // be chosen.
@@ -169,7 +180,8 @@ public class TestContainerStateManager {
         .getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
             xceiverClientManager.getType(), xceiverClientManager.getFactor(),
             HddsProtos.LifeCycleState.OPEN);
-    Assert.assertEquals(container2.getContainerID(), info.getContainerID());
+    Assert.assertEquals(container2.getContainerInfo().getContainerID(),
+        info.getContainerID());
   }
 
   @Test
@@ -183,30 +195,33 @@ public class TestContainerStateManager {
 
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED
-    ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerOwner);
+    ContainerWithPipeline container1 = scm.getClientProtocolServer()
+        .allocateContainer(
+            xceiverClientManager.getType(),
+            xceiverClientManager.getFactor(), containerOwner);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.ALLOCATED).size();
     Assert.assertEquals(1, containers);
 
-    scmContainerMapping.updateContainerState(container1.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATE);
+    scmContainerMapping
+        .updateContainerState(container1.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.CREATING).size();
     Assert.assertEquals(1, containers);
 
-    scmContainerMapping.updateContainerState(container1.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATED);
+    scmContainerMapping
+        .updateContainerState(container1.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATED);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
         HddsProtos.LifeCycleState.OPEN).size();
     Assert.assertEquals(1, containers);
 
     scmContainerMapping
-        .updateContainerState(container1.getContainerID(),
+        .updateContainerState(container1.getContainerInfo().getContainerID(),
             HddsProtos.LifeCycleEvent.FINALIZE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -214,7 +229,7 @@ public class TestContainerStateManager {
     Assert.assertEquals(1, containers);
 
     scmContainerMapping
-        .updateContainerState(container1.getContainerID(),
+        .updateContainerState(container1.getContainerInfo().getContainerID(),
             HddsProtos.LifeCycleEvent.CLOSE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -222,7 +237,7 @@ public class TestContainerStateManager {
     Assert.assertEquals(1, containers);
 
     scmContainerMapping
-        .updateContainerState(container1.getContainerID(),
+        .updateContainerState(container1.getContainerInfo().getContainerID(),
             HddsProtos.LifeCycleEvent.DELETE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -230,7 +245,7 @@ public class TestContainerStateManager {
     Assert.assertEquals(1, containers);
 
     scmContainerMapping
-        .updateContainerState(container1.getContainerID(),
+        .updateContainerState(container1.getContainerInfo().getContainerID(),
             HddsProtos.LifeCycleEvent.CLEANUP);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -239,13 +254,15 @@ public class TestContainerStateManager {
 
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // DELETING
-    ContainerInfo container2 = scm.getClientProtocolServer().allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerOwner);
-    scmContainerMapping.updateContainerState(container2.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATE);
+    ContainerWithPipeline container2 = scm.getClientProtocolServer()
+        .allocateContainer(
+            xceiverClientManager.getType(),
+            xceiverClientManager.getFactor(), containerOwner);
+    scmContainerMapping
+        .updateContainerState(container2.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATE);
     scmContainerMapping
-        .updateContainerState(container2.getContainerID(),
+        .updateContainerState(container2.getContainerInfo().getContainerID(),
             HddsProtos.LifeCycleEvent.TIMEOUT);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -254,17 +271,21 @@ public class TestContainerStateManager {
 
     // Allocate container1 and update its state from ALLOCATED -> CREATING ->
     // OPEN -> CLOSING -> CLOSED
-    ContainerInfo container3 = scm.getClientProtocolServer().allocateContainer(
-        xceiverClientManager.getType(),
-        xceiverClientManager.getFactor(), containerOwner);
-    scmContainerMapping.updateContainerState(container3.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATE);
-    scmContainerMapping.updateContainerState(container3.getContainerID(),
-        HddsProtos.LifeCycleEvent.CREATED);
-    scmContainerMapping.updateContainerState(container3.getContainerID(),
-        HddsProtos.LifeCycleEvent.FINALIZE);
+    ContainerWithPipeline container3 = scm.getClientProtocolServer()
+        .allocateContainer(
+            xceiverClientManager.getType(),
+            xceiverClientManager.getFactor(), containerOwner);
+    scmContainerMapping
+        .updateContainerState(container3.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATE);
+    scmContainerMapping
+        .updateContainerState(container3.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.CREATED);
+    scmContainerMapping
+        .updateContainerState(container3.getContainerInfo().getContainerID(),
+            HddsProtos.LifeCycleEvent.FINALIZE);
     scmContainerMapping
-        .updateContainerState(container3.getContainerID(),
+        .updateContainerState(container3.getContainerInfo().getContainerID(),
             HddsProtos.LifeCycleEvent.CLOSE);
     containers = containerStateManager.getMatchingContainerIDs(containerOwner,
         xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -274,12 +295,14 @@ public class TestContainerStateManager {
 
   @Test
   public void testUpdatingAllocatedBytes() throws Exception {
-    ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
-        xceiverClientManager.getType(),
+    ContainerWithPipeline container1 = scm.getClientProtocolServer()
+        .allocateContainer(xceiverClientManager.getType(),
         xceiverClientManager.getFactor(), containerOwner);
-    scmContainerMapping.updateContainerState(container1.getContainerID(),
+    scmContainerMapping.updateContainerState(container1
+            .getContainerInfo().getContainerID(),
         HddsProtos.LifeCycleEvent.CREATE);
-    scmContainerMapping.updateContainerState(container1.getContainerID(),
+    scmContainerMapping.updateContainerState(container1
+            .getContainerInfo().getContainerID(),
         HddsProtos.LifeCycleEvent.CREATED);
 
     Random ran = new Random();
@@ -292,18 +315,18 @@ public class TestContainerStateManager {
           .getMatchingContainer(size, containerOwner,
               xceiverClientManager.getType(), xceiverClientManager.getFactor(),
               HddsProtos.LifeCycleState.OPEN);
-      Assert.assertEquals(container1.getContainerID(), info.getContainerID());
+      Assert.assertEquals(container1.getContainerInfo().getContainerID(),
+          info.getContainerID());
 
       ContainerMapping containerMapping =
-          (ContainerMapping)scmContainerMapping;
+          (ContainerMapping) scmContainerMapping;
       // manually trigger a flush, this will persist the allocated bytes value
       // to disk
       containerMapping.flushContainerInfo();
 
       // the persisted value should always be equal to allocated size.
-      byte[] containerBytes =
-          containerMapping.getContainerStore().get(
-              Longs.toByteArray(container1.getContainerID()));
+      byte[] containerBytes = containerMapping.getContainerStore().get(
+          Longs.toByteArray(container1.getContainerInfo().getContainerID()));
       HddsProtos.SCMContainerInfo infoProto =
           HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes);
       ContainerInfo currentInfo = ContainerInfo.fromProtobuf(infoProto);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
index d4c9d4f..129cf04 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.junit.AfterClass;
@@ -78,12 +77,12 @@ public class TestContainerOperations {
    */
   @Test
   public void testCreate() throws Exception {
-    ContainerInfo container = storageClient.createContainer(HddsProtos
+    ContainerWithPipeline container = storageClient.createContainer(HddsProtos
         .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor
         .ONE, "OZONE");
-    assertEquals(container.getContainerID(),
-        storageClient.getContainer(container.getContainerID()).
-            getContainerID());
+    assertEquals(container.getContainerInfo().getContainerID(), storageClient
+        .getContainer(container.getContainerInfo().getContainerID())
+        .getContainerID());
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 0c1d8f2..d07097c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -131,7 +131,7 @@ public class TestStorageContainerManager {
       }
 
       try {
-        ContainerInfo container2 = mockClientServer
+        ContainerWithPipeline container2 = mockClientServer
             .allocateContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE,  "OZONE");
         if (expectPermissionDenied) {
@@ -144,7 +144,7 @@ public class TestStorageContainerManager {
       }
 
       try {
-        ContainerInfo container3 = mockClientServer
+        ContainerWithPipeline container3 = mockClientServer
             .allocateContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, "OZONE");
         if (expectPermissionDenied) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index c937980..4c2a904 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -23,7 +23,7 @@ import com.google.common.primitives.Longs;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -158,9 +158,11 @@ public class TestStorageContainerManagerHelper {
 
   private MetadataStore getContainerMetadata(Long containerID)
       throws IOException {
-    ContainerInfo container = cluster.getStorageContainerManager()
-        .getClientProtocolServer().getContainer(containerID);
-    DatanodeDetails leadDN = container.getPipeline().getLeader();
+    ContainerWithPipeline containerWithPipeline = cluster
+        .getStorageContainerManager().getClientProtocolServer()
+        .getContainerWithPipeline(containerID);
+
+    DatanodeDetails leadDN = containerWithPipeline.getPipeline().getLeader();
     OzoneContainer containerServer =
         getContainerServerByDatanodeUuid(leadDN.getUuidString());
     ContainerData containerData = containerServer.getContainerManager()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index cafe5db..214382e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -390,8 +390,8 @@ public class TestOzoneRpcClient {
         keyInfo.getLatestVersionLocations().getLocationList()) {
       ContainerInfo container =
           storageContainerLocationClient.getContainer(info.getContainerID());
-      if ((container.getPipeline().getFactor() != replicationFactor) ||
-          (container.getPipeline().getType() != replicationType)) {
+      if (!container.getReplicationFactor().equals(replicationFactor) || (
+          container.getReplicationType() != replicationType)) {
         return false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 265c82b..3e514e7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -35,7 +33,6 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
@@ -112,9 +109,9 @@ public class TestCloseContainerByPipeline {
             .get(0).getBlocksLatestVersionOnly().get(0);
 
     long containerID = ksmKeyLocationInfo.getContainerID();
-    List<DatanodeDetails> datanodes =
-        cluster.getStorageContainerManager().getContainerInfo(containerID)
-            .getPipeline().getMachines();
+    List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
+        .getScmContainerManager().getContainerWithPipeline(containerID)
+        .getPipeline().getMachines();
     Assert.assertTrue(datanodes.size() == 1);
 
     DatanodeDetails datanodeDetails = datanodes.get(0);
@@ -167,9 +164,9 @@ public class TestCloseContainerByPipeline {
             .get(0).getBlocksLatestVersionOnly().get(0);
 
     long containerID = ksmKeyLocationInfo.getContainerID();
-    List<DatanodeDetails> datanodes =
-        cluster.getStorageContainerManager().getContainerInfo(containerID)
-            .getPipeline().getMachines();
+    List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
+        .getScmContainerManager().getContainerWithPipeline(containerID)
+        .getPipeline().getMachines();
     Assert.assertTrue(datanodes.size() == 1);
 
     DatanodeDetails datanodeDetails = datanodes.get(0);
@@ -220,9 +217,9 @@ public class TestCloseContainerByPipeline {
             .get(0).getBlocksLatestVersionOnly().get(0);
 
     long containerID = ksmKeyLocationInfo.getContainerID();
-    List<DatanodeDetails> datanodes =
-        cluster.getStorageContainerManager().getContainerInfo(containerID)
-            .getPipeline().getMachines();
+    List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
+        .getScmContainerManager().getContainerWithPipeline(containerID)
+        .getPipeline().getMachines();
     Assert.assertTrue(datanodes.size() == 3);
 
     GenericTestUtils.LogCapturer logCapturer =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
index bafba32..1cc7ff8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -32,7 +33,6 @@ import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
index b1e9d26..144c562 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
@@ -17,14 +17,12 @@
  */
 package org.apache.hadoop.ozone.scm;
 
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -68,7 +66,7 @@ public class TestAllocateContainer {
 
   @Test
   public void testAllocate() throws Exception {
-    ContainerInfo container = storageContainerLocationClient.allocateContainer(
+    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(
         xceiverClientManager.getType(),
         xceiverClientManager.getFactor(),
         containerOwner);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index ce1fe46..42bb936 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.scm;
 
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -81,17 +81,18 @@ public class TestContainerSmallFile {
   @Test
   public void testAllocateWrite() throws Exception {
     String traceID = UUID.randomUUID().toString();
-    ContainerInfo container =
+    ContainerWithPipeline container =
         storageContainerLocationClient.allocateContainer(
             xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client = xceiverClientManager.acquireClient(
-        container.getPipeline(), container.getContainerID());
+    XceiverClientSpi client = xceiverClientManager
+        .acquireClient(container.getPipeline(),
+            container.getContainerInfo().getContainerID());
     ContainerProtocolCalls.createContainer(client,
-        container.getContainerID(), traceID);
+        container.getContainerInfo().getContainerID(), traceID);
 
     BlockID blockID = ContainerTestHelper.getTestBlockID(
-        container.getContainerID());
+        container.getContainerInfo().getContainerID());
     ContainerProtocolCalls.writeSmallFile(client, blockID,
         "data123".getBytes(), traceID);
     ContainerProtos.GetSmallFileResponseProto response =
@@ -104,20 +105,21 @@ public class TestContainerSmallFile {
   @Test
   public void testInvalidKeyRead() throws Exception {
     String traceID = UUID.randomUUID().toString();
-    ContainerInfo container =
+    ContainerWithPipeline container =
         storageContainerLocationClient.allocateContainer(
             xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client = xceiverClientManager.acquireClient(
-        container.getPipeline(), container.getContainerID());
+    XceiverClientSpi client = xceiverClientManager
+        .acquireClient(container.getPipeline(),
+            container.getContainerInfo().getContainerID());
     ContainerProtocolCalls.createContainer(client,
-        container.getContainerID(), traceID);
+        container.getContainerInfo().getContainerID(), traceID);
 
     thrown.expect(StorageContainerException.class);
     thrown.expectMessage("Unable to find the key");
 
     BlockID blockID = ContainerTestHelper.getTestBlockID(
-        container.getContainerID());
+        container.getContainerInfo().getContainerID());
     // Try to read a Key Container Name
     ContainerProtos.GetSmallFileResponseProto response =
         ContainerProtocolCalls.readSmallFile(client, blockID, traceID);
@@ -128,20 +130,20 @@ public class TestContainerSmallFile {
   public void testInvalidContainerRead() throws Exception {
     String traceID = UUID.randomUUID().toString();
     long nonExistContainerID = 8888L;
-    ContainerInfo container =
+    ContainerWithPipeline container =
         storageContainerLocationClient.allocateContainer(
             xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client = xceiverClientManager.
-        acquireClient(container.getPipeline(), container.getContainerID());
+    XceiverClientSpi client = xceiverClientManager
+        .acquireClient(container.getPipeline(),
+            container.getContainerInfo().getContainerID());
     ContainerProtocolCalls.createContainer(client,
-        container.getContainerID(), traceID);
+        container.getContainerInfo().getContainerID(), traceID);
     BlockID blockID = ContainerTestHelper.getTestBlockID(
-        container.getContainerID());
+        container.getContainerInfo().getContainerID());
     ContainerProtocolCalls.writeSmallFile(client, blockID,
         "data123".getBytes(), traceID);
 
-
     thrown.expect(StorageContainerException.class);
     thrown.expectMessage("Unable to find the container");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 732221a..a6bb586 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.scm;
 
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -136,7 +137,7 @@ public class TestSCMCli {
   private boolean containerExist(long containerID) {
     try {
       ContainerInfo container = scm.getClientProtocolServer()
-          .getContainer(containerID);
+          .getContainerWithPipeline(containerID).getContainerInfo();
       return container != null
           && containerID == container.getContainerID();
     } catch (IOException e) {
@@ -157,31 +158,34 @@ public class TestSCMCli {
     // 1. Test to delete a non-empty container.
     // ****************************************
     // Create an non-empty container
-    ContainerInfo container = containerOperationClient
+    ContainerWithPipeline container = containerOperationClient
         .createContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
 
     ContainerData cdata = ContainerData
         .getFromProtBuf(containerOperationClient.readContainer(
-            container.getContainerID(), container.getPipeline()), conf);
-    KeyUtils.getDB(cdata, conf).put(Longs.toByteArray(container.getContainerID()),
-        "someKey".getBytes());
-    Assert.assertTrue(containerExist(container.getContainerID()));
+            container.getContainerInfo().getContainerID()), conf);
+    KeyUtils.getDB(cdata, conf)
+        .put(Longs.toByteArray(container.getContainerInfo().getContainerID()),
+            "someKey".getBytes());
+    Assert.assertTrue(
+        containerExist(container.getContainerInfo().getContainerID()));
 
     // Gracefully delete a container should fail because it is open.
-    delCmd = new String[] {"-container", "-delete", "-c",
-        Long.toString(container.getContainerID())};
+    delCmd = new String[]{"-container", "-delete", "-c",
+        Long.toString(container.getContainerInfo().getContainerID())};
     testErr = new ByteArrayOutputStream();
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     exitCode = runCommandAndGetOutput(delCmd, out, testErr);
     assertEquals(EXECUTION_ERROR, exitCode);
     assertTrue(testErr.toString()
         .contains("Deleting an open container is not allowed."));
-    Assert.assertTrue(containerExist(container.getContainerID()));
+    Assert.assertTrue(
+        containerExist(container.getContainerInfo().getContainerID()));
 
     // Close the container
     containerOperationClient.closeContainer(
-        container.getContainerID(), container.getPipeline());
+        container.getContainerInfo().getContainerID());
 
     // Gracefully delete a container should fail because it is not empty.
     testErr = new ByteArrayOutputStream();
@@ -189,45 +193,49 @@ public class TestSCMCli {
     assertEquals(EXECUTION_ERROR, exitCode2);
     assertTrue(testErr.toString()
         .contains("Container cannot be deleted because it is not empty."));
-    Assert.assertTrue(containerExist(container.getContainerID()));
+    Assert.assertTrue(
+        containerExist(container.getContainerInfo().getContainerID()));
 
     // Try force delete again.
-    delCmd = new String[] {"-container", "-delete", "-c",
-        Long.toString(container.getContainerID()), "-f"};
+    delCmd = new String[]{"-container", "-delete", "-c",
+        Long.toString(container.getContainerInfo().getContainerID()), "-f"};
     exitCode = runCommandAndGetOutput(delCmd, out, null);
     assertEquals("Expected success, found:", ResultCode.SUCCESS, exitCode);
-    assertFalse(containerExist(container.getContainerID()));
+    assertFalse(containerExist(container.getContainerInfo().getContainerID()));
 
     // ****************************************
     // 2. Test to delete an empty container.
     // ****************************************
     // Create an empty container
-    ContainerInfo emptyContainer = containerOperationClient
+    ContainerWithPipeline emptyContainer = containerOperationClient
         .createContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
-    containerOperationClient.closeContainer(emptyContainer.getContainerID(),
-        container.getPipeline());
-    Assert.assertTrue(containerExist(emptyContainer.getContainerID()));
+    containerOperationClient
+        .closeContainer(emptyContainer.getContainerInfo().getContainerID());
+    Assert.assertTrue(
+        containerExist(emptyContainer.getContainerInfo().getContainerID()));
 
     // Successfully delete an empty container.
-    delCmd = new String[] {"-container", "-delete", "-c",
-        Long.toString(emptyContainer.getContainerID())};
+    delCmd = new String[]{"-container", "-delete", "-c",
+        Long.toString(emptyContainer.getContainerInfo().getContainerID())};
     exitCode = runCommandAndGetOutput(delCmd, out, null);
     assertEquals(ResultCode.SUCCESS, exitCode);
-    assertFalse(containerExist(emptyContainer.getContainerID()));
+    assertFalse(
+        containerExist(emptyContainer.getContainerInfo().getContainerID()));
 
     // After the container is deleted,
     // another container can now be recreated.
-    ContainerInfo newContainer = containerOperationClient.
+    ContainerWithPipeline newContainer = containerOperationClient.
         createContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
-    Assert.assertTrue(containerExist(newContainer.getContainerID()));
+    Assert.assertTrue(
+        containerExist(newContainer.getContainerInfo().getContainerID()));
 
     // ****************************************
     // 3. Test to delete a non-exist container.
     // ****************************************
-    long nonExistContainerID =  ContainerTestHelper.getTestContainerID();
-    delCmd = new String[] {"-container", "-delete", "-c",
+    long nonExistContainerID = ContainerTestHelper.getTestContainerID();
+    delCmd = new String[]{"-container", "-delete", "-c",
         Long.toString(nonExistContainerID)};
     testErr = new ByteArrayOutputStream();
     exitCode = runCommandAndGetOutput(delCmd, out, testErr);
@@ -250,45 +258,33 @@ public class TestSCMCli {
         "LeaderID: %s\n" +
         "Datanodes: [%s]\n";
 
-    String formatStrWithHash =
-        "Container id: %s\n" +
-        "Container State: %s\n" +
-        "Container Hash: %s\n" +
-        "Container DB Path: %s\n" +
-        "Container Path: %s\n" +
-        "Container Metadata: {%s}\n" +
-        "LeaderID: %s\n" +
-        "Datanodes: [%s]\n";
-
     // Test a non-exist container
     String containerID =
         Long.toString(ContainerTestHelper.getTestContainerID());
-    String[] info = { "-container", "-info", containerID };
+    String[] info = {"-container", "-info", containerID};
     int exitCode = runCommandAndGetOutput(info, null, null);
     assertEquals("Expected Execution Error, Did not find that.",
         EXECUTION_ERROR, exitCode);
 
     // Create an empty container.
-    ContainerInfo container = containerOperationClient
+    ContainerWithPipeline container = containerOperationClient
         .createContainer(xceiverClientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
-    ContainerData data = ContainerData
-        .getFromProtBuf(containerOperationClient.
-            readContainer(container.getContainerID(),
-                container.getPipeline()), conf);
+    ContainerData data = ContainerData.getFromProtBuf(containerOperationClient
+        .readContainer(container.getContainerInfo().getContainerID()), conf);
 
-    info = new String[] { "-container", "-info", "-c",
-        Long.toString(container.getContainerID()) };
+    info = new String[]{"-container", "-info", "-c",
+        Long.toString(container.getContainerInfo().getContainerID())};
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     exitCode = runCommandAndGetOutput(info, out, null);
     assertEquals("Expected Success, did not find it.", ResultCode.SUCCESS,
-            exitCode);
+        exitCode);
 
     String openStatus = data.isOpen() ? "OPEN" : "CLOSED";
-    String expected =
-        String.format(formatStr, container.getContainerID(), openStatus,
-        data.getDBPath(), data.getContainerPath(), "",
-        datanodeDetails.getHostName(), datanodeDetails.getHostName());
+    String expected = String.format(formatStr, container.getContainerInfo()
+            .getContainerID(), openStatus, data.getDBPath(),
+        data.getContainerPath(), "", datanodeDetails.getHostName(),
+        datanodeDetails.getHostName());
     assertEquals(expected, out.toString());
 
     out.reset();
@@ -299,40 +295,39 @@ public class TestSCMCli {
             HddsProtos.ReplicationFactor.ONE, containerOwner);
     data = ContainerData
         .getFromProtBuf(containerOperationClient.readContainer(
-            container.getContainerID(), container.getPipeline()), conf);
+            container.getContainerInfo().getContainerID()), conf);
     KeyUtils.getDB(data, conf)
         .put(containerID.getBytes(), "someKey".getBytes());
 
-    info = new String[] { "-container", "-info", "-c",
-        Long.toString(container.getContainerID()) };
+    info = new String[]{"-container", "-info", "-c",
+        Long.toString(container.getContainerInfo().getContainerID())};
     exitCode = runCommandAndGetOutput(info, out, null);
     assertEquals(ResultCode.SUCCESS, exitCode);
 
     openStatus = data.isOpen() ? "OPEN" : "CLOSED";
-    expected = String.format(formatStr, container.getContainerID(), openStatus,
-        data.getDBPath(), data.getContainerPath(), "",
-        datanodeDetails.getHostName(), datanodeDetails.getHostName());
+    expected = String.format(formatStr, container.getContainerInfo().
+            getContainerID(), openStatus, data.getDBPath(),
+        data.getContainerPath(), "", datanodeDetails.getHostName(),
+        datanodeDetails.getHostName());
     assertEquals(expected, out.toString());
 
     out.reset();
 
-
     // Close last container and test info again.
-    containerOperationClient.closeContainer(
-        container.getContainerID(), container.getPipeline());
+    containerOperationClient
+        .closeContainer(container.getContainerInfo().getContainerID());
 
-    info = new String[] { "-container", "-info", "-c",
-        Long.toString(container.getContainerID()) };
+    info = new String[]{"-container", "-info", "-c",
+        Long.toString(container.getContainerInfo().getContainerID())};
     exitCode = runCommandAndGetOutput(info, out, null);
     assertEquals(ResultCode.SUCCESS, exitCode);
-    data = ContainerData
-        .getFromProtBuf(containerOperationClient.readContainer(
-            container.getContainerID(), container.getPipeline()), conf);
+    data = ContainerData.getFromProtBuf(containerOperationClient
+        .readContainer(container.getContainerInfo().getContainerID()), conf);
 
     openStatus = data.isOpen() ? "OPEN" : "CLOSED";
     expected = String
-        .format(formatStr, container.getContainerID(), openStatus,
-            data.getDBPath(), data.getContainerPath(), "",
+        .format(formatStr, container.getContainerInfo().getContainerID(),
+            openStatus, data.getDBPath(), data.getContainerPath(), "",
             datanodeDetails.getHostName(), datanodeDetails.getHostName());
     assertEquals(expected, out.toString());
   }
@@ -360,10 +355,10 @@ public class TestSCMCli {
     // Create 20 containers for testing.
     List<ContainerInfo> containers = new ArrayList<>();
     for (int index = 0; index < 20; index++) {
-      ContainerInfo container = containerOperationClient.createContainer(
+      ContainerWithPipeline container = containerOperationClient.createContainer(
           xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE,
           containerOwner);
-      containers.add(container);
+      containers.add(container.getContainerInfo());
     }
 
     ByteArrayOutputStream out = new ByteArrayOutputStream();
@@ -417,11 +412,11 @@ public class TestSCMCli {
 
   @Test
   public void testCloseContainer() throws Exception {
-    long containerID = containerOperationClient
-        .createContainer(xceiverClientManager.getType(),
-            HddsProtos.ReplicationFactor.ONE, containerOwner).getContainerID();
+    long containerID = containerOperationClient.createContainer(
+        xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE,
+        containerOwner).getContainerInfo().getContainerID();
     ContainerInfo container = scm.getClientProtocolServer()
-        .getContainer(containerID);
+        .getContainerWithPipeline(containerID).getContainerInfo();
     assertNotNull(container);
     assertEquals(containerID, container.getContainerID());
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
index 56f3c7a..a75264e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.scm;
 import com.google.common.cache.Cache;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -98,22 +98,25 @@ public class TestXceiverClientManager {
         shouldUseGrpc);
     XceiverClientManager clientManager = new XceiverClientManager(conf);
 
-    ContainerInfo container1 = storageContainerLocationClient
+    ContainerWithPipeline container1 = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
             containerOwner);
-    XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
-        container1.getContainerID());
+    XceiverClientSpi client1 = clientManager
+        .acquireClient(container1.getPipeline(),
+            container1.getContainerInfo().getContainerID());
     Assert.assertEquals(1, client1.getRefcount());
 
-    ContainerInfo container2 = storageContainerLocationClient
+    ContainerWithPipeline container2 = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
             containerOwner);
-    XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
-        container2.getContainerID());
+    XceiverClientSpi client2 = clientManager
+        .acquireClient(container2.getPipeline(),
+            container2.getContainerInfo().getContainerID());
     Assert.assertEquals(1, client2.getRefcount());
 
-    XceiverClientSpi client3 = clientManager.acquireClient(container1.getPipeline(),
-        container1.getContainerID());
+    XceiverClientSpi client3 = clientManager
+        .acquireClient(container1.getPipeline(),
+            container1.getContainerInfo().getContainerID());
     Assert.assertEquals(2, client3.getRefcount());
     Assert.assertEquals(2, client1.getRefcount());
     Assert.assertEquals(client1, client3);
@@ -132,32 +135,35 @@ public class TestXceiverClientManager {
     Cache<Long, XceiverClientSpi> cache =
         clientManager.getClientCache();
 
-    ContainerInfo container1 =
+    ContainerWithPipeline container1 =
         storageContainerLocationClient.allocateContainer(
             clientManager.getType(), HddsProtos.ReplicationFactor.ONE,
             containerOwner);
-    XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
-        container1.getContainerID());
+    XceiverClientSpi client1 = clientManager
+        .acquireClient(container1.getPipeline(),
+            container1.getContainerInfo().getContainerID());
     Assert.assertEquals(1, client1.getRefcount());
     Assert.assertEquals(container1.getPipeline(),
         client1.getPipeline());
 
-    ContainerInfo container2 =
+    ContainerWithPipeline container2 =
         storageContainerLocationClient.allocateContainer(
             clientManager.getType(),
             HddsProtos.ReplicationFactor.ONE, containerOwner);
-    XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
-        container2.getContainerID());
+    XceiverClientSpi client2 = clientManager
+        .acquireClient(container2.getPipeline(),
+            container2.getContainerInfo().getContainerID());
     Assert.assertEquals(1, client2.getRefcount());
     Assert.assertNotEquals(client1, client2);
 
     // least recent container (i.e containerName1) is evicted
-    XceiverClientSpi nonExistent1 = cache.getIfPresent(container1.getContainerID());
+    XceiverClientSpi nonExistent1 = cache
+        .getIfPresent(container1.getContainerInfo().getContainerID());
     Assert.assertEquals(null, nonExistent1);
     // However container call should succeed because of refcount on the client.
     String traceID1 = "trace" + RandomStringUtils.randomNumeric(4);
     ContainerProtocolCalls.createContainer(client1,
-        container1.getContainerID(),  traceID1);
+        container1.getContainerInfo().getContainerID(), traceID1);
 
     // After releasing the client, this connection should be closed
     // and any container operations should fail
@@ -166,7 +172,7 @@ public class TestXceiverClientManager {
     String expectedMessage = "This channel is not connected.";
     try {
       ContainerProtocolCalls.createContainer(client1,
-          container1.getContainerID(), traceID1);
+          container1.getContainerInfo().getContainerID(), traceID1);
       Assert.fail("Create container should throw exception on closed"
           + "client");
     } catch (Exception e) {
@@ -186,28 +192,30 @@ public class TestXceiverClientManager {
     Cache<Long, XceiverClientSpi> cache =
         clientManager.getClientCache();
 
-    ContainerInfo container1 =
+    ContainerWithPipeline container1 =
         storageContainerLocationClient.allocateContainer(
             clientManager.getType(),
             clientManager.getFactor(), containerOwner);
-    XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
-        container1.getContainerID());
+    XceiverClientSpi client1 = clientManager
+        .acquireClient(container1.getPipeline(),
+            container1.getContainerInfo().getContainerID());
     Assert.assertEquals(1, client1.getRefcount());
 
     clientManager.releaseClient(client1);
     Assert.assertEquals(0, client1.getRefcount());
 
-    ContainerInfo container2 = storageContainerLocationClient
+    ContainerWithPipeline container2 = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
             containerOwner);
-    XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
-        container2.getContainerID());
+    XceiverClientSpi client2 = clientManager
+        .acquireClient(container2.getPipeline(),
+            container2.getContainerInfo().getContainerID());
     Assert.assertEquals(1, client2.getRefcount());
     Assert.assertNotEquals(client1, client2);
 
-
     // now client 1 should be evicted
-    XceiverClientSpi nonExistent = cache.getIfPresent(container1.getContainerID());
+    XceiverClientSpi nonExistent = cache
+        .getIfPresent(container1.getContainerInfo().getContainerID());
     Assert.assertEquals(null, nonExistent);
 
     // Any container operation should now fail
@@ -215,7 +223,7 @@ public class TestXceiverClientManager {
     String expectedMessage = "This channel is not connected.";
     try {
       ContainerProtocolCalls.createContainer(client1,
-          container1.getContainerID(), traceID2);
+          container1.getContainerInfo().getContainerID(), traceID2);
       Assert.fail("Create container should throw exception on closed"
           + "client");
     } catch (Exception e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/061b1685
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/061b1685
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/061b1685

Branch: refs/heads/HDFS-12090
Commit: 061b168529a9cd5d6a3a482c890bacdb49186368
Parents: e4bf38c
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Jul 6 12:09:05 2018 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Jul 6 12:09:05 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |    4 +
 dev-support/bin/ozone-dist-layout-stitching     |    2 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |    6 +-
 .../src/main/compose/ozone/docker-compose.yaml  |    6 +-
 .../src/main/compose/ozone/docker-config        |    2 +-
 .../src/main/compose/ozoneperf/README.md        |    4 +-
 .../main/compose/ozoneperf/docker-compose.yaml  |    6 +-
 .../src/main/compose/ozoneperf/docker-config    |    2 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |    2 +-
 .../org/apache/hadoop/ozone/OzoneConsts.java    |   22 +-
 .../org/apache/hadoop/ozone/common/Storage.java |    6 +-
 .../main/proto/ScmBlockLocationProtocol.proto   |   10 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |    6 +-
 .../common/src/main/resources/ozone-default.xml |  120 +-
 .../src/main/resources/webapps/static/ozone.js  |    4 +-
 .../webapps/static/templates/config.html        |    4 +-
 .../hadoop/hdds/scm/block/DeletedBlockLog.java  |    2 +-
 .../scm/container/states/ContainerStateMap.java |    6 +-
 .../hadoop/hdds/scm/node/CommandQueue.java      |    2 +-
 .../hdds/scm/server/SCMBlockProtocolServer.java |    2 +-
 .../hadoop/hdds/scm/cli/OzoneBaseCLI.java       |    2 +-
 .../src/test/acceptance/basic/basic.robot       |    6 +-
 .../test/acceptance/basic/docker-compose.yaml   |    8 +-
 .../src/test/acceptance/basic/docker-config     |    4 +-
 .../src/test/acceptance/basic/ozone-shell.robot |   18 +-
 .../src/test/acceptance/commonlib.robot         |    4 +-
 .../test/acceptance/ozonefs/docker-compose.yaml |    8 +-
 .../src/test/acceptance/ozonefs/docker-config   |    4 +-
 .../src/test/acceptance/ozonefs/ozonefs.robot   |    6 +-
 .../apache/hadoop/ozone/client/BucketArgs.java  |    4 +-
 .../hadoop/ozone/client/OzoneClientFactory.java |   86 +-
 .../apache/hadoop/ozone/client/OzoneKey.java    |    2 +-
 .../apache/hadoop/ozone/client/VolumeArgs.java  |    4 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |   24 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   54 +-
 .../client/rest/DefaultRestServerSelector.java  |    2 +-
 .../hadoop/ozone/client/rest/RestClient.java    |   15 +-
 .../ozone/client/rest/RestServerSelector.java   |    2 +-
 .../hadoop/ozone/client/rpc/RpcClient.java      |  142 +-
 .../ozone/client/TestHddsClientUtils.java       |   24 +-
 hadoop-ozone/common/pom.xml                     |    2 +-
 hadoop-ozone/common/src/main/bin/ozone          |    9 +-
 hadoop-ozone/common/src/main/bin/start-ozone.sh |   16 +-
 hadoop-ozone/common/src/main/bin/stop-ozone.sh  |   16 +-
 .../java/org/apache/hadoop/ozone/KsmUtils.java  |   95 --
 .../java/org/apache/hadoop/ozone/OmUtils.java   |   94 ++
 .../apache/hadoop/ozone/freon/OzoneGetConf.java |   16 +-
 .../apache/hadoop/ozone/ksm/KSMConfigKeys.java  |   81 --
 .../hadoop/ozone/ksm/helpers/KsmBucketArgs.java |  233 ---
 .../hadoop/ozone/ksm/helpers/KsmBucketInfo.java |  235 ---
 .../hadoop/ozone/ksm/helpers/KsmKeyArgs.java    |  119 --
 .../hadoop/ozone/ksm/helpers/KsmKeyInfo.java    |  277 ----
 .../ozone/ksm/helpers/KsmKeyLocationInfo.java   |  129 --
 .../ksm/helpers/KsmKeyLocationInfoGroup.java    |  118 --
 .../ozone/ksm/helpers/KsmOzoneAclMap.java       |  110 --
 .../hadoop/ozone/ksm/helpers/KsmVolumeArgs.java |  223 ---
 .../ozone/ksm/helpers/OpenKeySession.java       |   50 -
 .../hadoop/ozone/ksm/helpers/ServiceInfo.java   |  237 ---
 .../hadoop/ozone/ksm/helpers/VolumeArgs.java    |  140 --
 .../hadoop/ozone/ksm/helpers/package-info.java  |   18 -
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../ksm/protocol/KeySpaceManagerProtocol.java   |  252 ----
 .../hadoop/ozone/ksm/protocol/package-info.java |   19 -
 ...ceManagerProtocolClientSideTranslatorPB.java |  769 ----------
 .../protocolPB/KeySpaceManagerProtocolPB.java   |   34 -
 .../ozone/ksm/protocolPB/package-info.java      |   19 -
 .../apache/hadoop/ozone/om/OMConfigKeys.java    |   81 ++
 .../hadoop/ozone/om/helpers/OmBucketArgs.java   |  233 +++
 .../hadoop/ozone/om/helpers/OmBucketInfo.java   |  235 +++
 .../hadoop/ozone/om/helpers/OmKeyArgs.java      |  119 ++
 .../hadoop/ozone/om/helpers/OmKeyInfo.java      |  277 ++++
 .../ozone/om/helpers/OmKeyLocationInfo.java     |  129 ++
 .../om/helpers/OmKeyLocationInfoGroup.java      |  118 ++
 .../hadoop/ozone/om/helpers/OmOzoneAclMap.java  |  110 ++
 .../hadoop/ozone/om/helpers/OmVolumeArgs.java   |  223 +++
 .../hadoop/ozone/om/helpers/OpenKeySession.java |   50 +
 .../hadoop/ozone/om/helpers/ServiceInfo.java    |  237 +++
 .../hadoop/ozone/om/helpers/VolumeArgs.java     |  140 ++
 .../hadoop/ozone/om/helpers/package-info.java   |   18 +
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 .../ozone/om/protocol/OzoneManagerProtocol.java |  252 ++++
 .../hadoop/ozone/om/protocol/package-info.java  |   19 +
 ...neManagerProtocolClientSideTranslatorPB.java |  769 ++++++++++
 .../om/protocolPB/OzoneManagerProtocolPB.java   |   34 +
 .../ozone/om/protocolPB/package-info.java       |   19 +
 .../hadoop/ozone/protocolPB/KSMPBHelper.java    |  113 --
 .../hadoop/ozone/protocolPB/OMPBHelper.java     |  113 ++
 .../main/proto/KeySpaceManagerProtocol.proto    |  474 ------
 .../src/main/proto/OzoneManagerProtocol.proto   |  480 +++++++
 hadoop-ozone/docs/content/GettingStarted.md     |   18 +-
 hadoop-ozone/docs/content/Metrics.md            |   10 +-
 hadoop-ozone/docs/content/_index.md             |   12 +-
 hadoop-ozone/docs/static/OzoneOverview.svg      |    2 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |   24 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java      |   66 +-
 .../ozone/TestOzoneConfigurationFields.java     |    4 +-
 .../ozone/TestStorageContainerManager.java      |   18 +-
 .../TestStorageContainerManagerHelper.java      |   12 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |    6 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java    |   18 +-
 .../commandhandler/TestBlockDeletion.java       |   45 +-
 .../TestCloseContainerByPipeline.java           |   35 +-
 .../TestCloseContainerHandler.java              |   14 +-
 .../ozone/ksm/TestContainerReportWithKeys.java  |  143 --
 .../apache/hadoop/ozone/ksm/TestKSMMetrcis.java |  306 ----
 .../apache/hadoop/ozone/ksm/TestKSMSQLCli.java  |  284 ----
 .../hadoop/ozone/ksm/TestKeySpaceManager.java   | 1350 ------------------
 .../ksm/TestKeySpaceManagerRestInterface.java   |  135 --
 .../ozone/ksm/TestKsmBlockVersioning.java       |  253 ----
 .../ksm/TestMultipleContainerReadWrite.java     |  215 ---
 .../ozone/om/TestContainerReportWithKeys.java   |  143 ++
 .../om/TestMultipleContainerReadWrite.java      |  215 +++
 .../hadoop/ozone/om/TestOmBlockVersioning.java  |  253 ++++
 .../apache/hadoop/ozone/om/TestOmMetrics.java   |  313 ++++
 .../apache/hadoop/ozone/om/TestOmSQLCli.java    |  284 ++++
 .../hadoop/ozone/om/TestOzoneManager.java       | 1349 +++++++++++++++++
 .../ozone/om/TestOzoneManagerRestInterface.java |  135 ++
 .../hadoop/ozone/ozShell/TestOzoneShell.java    |   14 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |    3 +-
 .../ozone/web/TestDistributedOzoneVolumes.java  |   12 +-
 .../hadoop/ozone/web/client/TestKeys.java       |   58 +-
 .../src/test/resources/webapps/ksm/.gitkeep     |   15 -
 .../resources/webapps/ozoneManager/.gitkeep     |   15 +
 .../server/datanode/ObjectStoreHandler.java     |   33 +-
 .../ozone/web/handlers/KeyProcessTemplate.java  |    4 +-
 .../web/handlers/VolumeProcessTemplate.java     |    4 +-
 .../web/storage/DistributedStorageHandler.java  |  153 +-
 .../apache/hadoop/ozone/ksm/BucketManager.java  |   79 -
 .../hadoop/ozone/ksm/BucketManagerImpl.java     |  315 ----
 .../org/apache/hadoop/ozone/ksm/KSMMXBean.java  |   31 -
 .../hadoop/ozone/ksm/KSMMetadataManager.java    |  253 ----
 .../ozone/ksm/KSMMetadataManagerImpl.java       |  526 -------
 .../org/apache/hadoop/ozone/ksm/KSMMetrics.java |  459 ------
 .../org/apache/hadoop/ozone/ksm/KSMStorage.java |   90 --
 .../hadoop/ozone/ksm/KeyDeletingService.java    |  142 --
 .../org/apache/hadoop/ozone/ksm/KeyManager.java |  175 ---
 .../apache/hadoop/ozone/ksm/KeyManagerImpl.java |  566 --------
 .../hadoop/ozone/ksm/KeySpaceManager.java       |  912 ------------
 .../ozone/ksm/KeySpaceManagerHttpServer.java    |   78 -
 .../hadoop/ozone/ksm/OpenKeyCleanupService.java |  117 --
 .../ozone/ksm/ServiceListJSONServlet.java       |  103 --
 .../apache/hadoop/ozone/ksm/VolumeManager.java  |  100 --
 .../hadoop/ozone/ksm/VolumeManagerImpl.java     |  391 -----
 .../ozone/ksm/exceptions/KSMException.java      |  118 --
 .../ozone/ksm/exceptions/package-info.java      |   19 -
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../apache/hadoop/ozone/om/BucketManager.java   |   79 +
 .../hadoop/ozone/om/BucketManagerImpl.java      |  315 ++++
 .../hadoop/ozone/om/KeyDeletingService.java     |  142 ++
 .../org/apache/hadoop/ozone/om/KeyManager.java  |  175 +++
 .../apache/hadoop/ozone/om/KeyManagerImpl.java  |  566 ++++++++
 .../org/apache/hadoop/ozone/om/OMMXBean.java    |   31 +
 .../hadoop/ozone/om/OMMetadataManager.java      |  253 ++++
 .../org/apache/hadoop/ozone/om/OMMetrics.java   |  459 ++++++
 .../org/apache/hadoop/ozone/om/OMStorage.java   |   90 ++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  |  526 +++++++
 .../hadoop/ozone/om/OpenKeyCleanupService.java  |  117 ++
 .../apache/hadoop/ozone/om/OzoneManager.java    |  911 ++++++++++++
 .../hadoop/ozone/om/OzoneManagerHttpServer.java |   78 +
 .../hadoop/ozone/om/ServiceListJSONServlet.java |  103 ++
 .../apache/hadoop/ozone/om/VolumeManager.java   |  100 ++
 .../hadoop/ozone/om/VolumeManagerImpl.java      |  390 +++++
 .../hadoop/ozone/om/exceptions/OMException.java |  118 ++
 .../ozone/om/exceptions/package-info.java       |   19 +
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 ...ceManagerProtocolServerSideTranslatorPB.java |  559 --------
 ...neManagerProtocolServerSideTranslatorPB.java |  571 ++++++++
 .../hadoop/ozone/protocolPB/package-info.java   |    2 +-
 .../src/main/webapps/ksm/index.html             |   70 -
 .../src/main/webapps/ksm/ksm-metrics.html       |   44 -
 .../ozone-manager/src/main/webapps/ksm/ksm.js   |  110 --
 .../ozone-manager/src/main/webapps/ksm/main.css |   23 -
 .../src/main/webapps/ksm/main.html              |   18 -
 .../src/main/webapps/ozoneManager/index.html    |   70 +
 .../src/main/webapps/ozoneManager/main.css      |   23 +
 .../src/main/webapps/ozoneManager/main.html     |   18 +
 .../main/webapps/ozoneManager/om-metrics.html   |   44 +
 .../main/webapps/ozoneManager/ozoneManager.js   |  110 ++
 .../hadoop/ozone/ksm/TestBucketManagerImpl.java |  395 -----
 .../hadoop/ozone/ksm/TestChunkStreams.java      |  234 ---
 .../ksm/TestKeySpaceManagerHttpServer.java      |  141 --
 .../apache/hadoop/ozone/ksm/package-info.java   |   21 -
 .../hadoop/ozone/om/TestBucketManagerImpl.java  |  394 +++++
 .../hadoop/ozone/om/TestChunkStreams.java       |  234 +++
 .../ozone/om/TestOzoneManagerHttpServer.java    |  141 ++
 .../apache/hadoop/ozone/om/package-info.java    |   21 +
 .../hadoop/fs/ozone/contract/OzoneContract.java |    4 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |   48 +-
 188 files changed, 13252 insertions(+), 13237 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index 428950b..d555036 100644
--- a/.gitignore
+++ b/.gitignore
@@ -50,6 +50,10 @@ patchprocess/
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log
 
+# Ignore files generated by HDDS acceptance tests.
+hadoop-ozone/acceptance-test/docker-compose.log
+hadoop-ozone/acceptance-test/junit-results.xml
+
 #robotframework outputs
 log.html
 output.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/dev-support/bin/ozone-dist-layout-stitching
----------------------------------------------------------------------
diff --git a/dev-support/bin/ozone-dist-layout-stitching b/dev-support/bin/ozone-dist-layout-stitching
index be330d5..c30a37d 100755
--- a/dev-support/bin/ozone-dist-layout-stitching
+++ b/dev-support/bin/ozone-dist-layout-stitching
@@ -148,7 +148,7 @@ run copy "${ROOT}/hadoop-ozone/tools/target/hadoop-ozone-tools-${HDDS_VERSION}"
 mkdir -p "./share/hadoop/ozonefs"
 cp "${ROOT}/hadoop-ozone/ozonefs/target/hadoop-ozone-filesystem-${HDDS_VERSION}.jar" "./share/hadoop/ozonefs/hadoop-ozone-filesystem.jar"
 # Optional documentation, could be missing
-cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ksm/
+cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/ozone/webapps/ozoneManager/
 cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" ./share/hadoop/hdds/webapps/scm/
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 6573a81..3826f67 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -404,13 +404,13 @@ esac
 # export HDFS_DFSROUTER_OPTS=""
 
 ###
-# HDFS Key Space Manager specific parameters
+# Ozone Manager specific parameters
 ###
-# Specify the JVM options to be used when starting the HDFS Key Space Manager.
+# Specify the JVM options to be used when starting the Ozone Manager.
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HDFS_KSM_OPTS=""
+# export HDFS_OM_OPTS=""
 
 ###
 # HDFS StorageContainerManager specific parameters

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
index 512c649..bb5e8dd 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozone/docker-compose.yaml
@@ -25,17 +25,17 @@ services:
       command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
-   ksm:
+   ozoneManager:
       image: apache/hadoop-runner
       volumes:
          - ../../ozone:/opt/hadoop
       ports:
          - 9874:9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","ksm"]
+      command: ["/opt/hadoop/bin/ozone","om"]
    scm:
       image: apache/hadoop-runner
       volumes:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozone/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config
index 632f870..50abb18 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.om.address=ozoneManager
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozoneperf/README.md
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozoneperf/README.md b/hadoop-dist/src/main/compose/ozoneperf/README.md
index a78f208..527ff41 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/README.md
+++ b/hadoop-dist/src/main/compose/ozoneperf/README.md
@@ -67,7 +67,7 @@ http://localhost:9090/graph
 Example queries:
 
 ```
-Hadoop_KeySpaceManager_NumKeyCommits
-rate(Hadoop_KeySpaceManager_NumKeyCommits[10m])
+Hadoop_OzoneManager_NumKeyCommits
+rate(Hadoop_OzoneManager_NumKeyCommits[10m])
 rate(Hadoop_Ozone_BYTES_WRITTEN[10m])
 ```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
index 3233c11..6d1d9ca 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-compose.yaml
@@ -26,7 +26,7 @@ services:
       command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
-   ksm:
+   ozoneManager:
       image: apache/hadoop-runner
       volumes:
         - ../../ozone:/opt/hadoop
@@ -34,10 +34,10 @@ services:
       ports:
          - 9874:9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","ksm"]
+      command: ["/opt/hadoop/bin/ozone","om"]
    scm:
       image: apache/hadoop-runner
       volumes:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-dist/src/main/compose/ozoneperf/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-dist/src/main/compose/ozoneperf/docker-config b/hadoop-dist/src/main/compose/ozoneperf/docker-config
index 2be22a7..2539950 100644
--- a/hadoop-dist/src/main/compose/ozoneperf/docker-config
+++ b/hadoop-dist/src/main/compose/ozoneperf/docker-config
@@ -14,7 +14,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONE-SITE.XML_ozone.ksm.address=ksm
+OZONE-SITE.XML_ozone.om.address=ozoneManager
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index ad326dc..4f1b1c8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -94,7 +94,7 @@ public final class ScmConfigKeys {
       "ozone.scm.datanode.port";
   public static final int OZONE_SCM_DATANODE_PORT_DEFAULT = 9861;
 
-  // OZONE_KSM_PORT_DEFAULT = 9862
+  // OZONE_OM_PORT_DEFAULT = 9862
   public static final String OZONE_SCM_BLOCK_CLIENT_PORT_KEY =
       "ozone.scm.block.client.port";
   public static final int OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT = 9863;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 08a5ffd..4fad5d8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -93,7 +93,7 @@ public final class OzoneConsts {
   public static final String BLOCK_DB = "block.db";
   public static final String OPEN_CONTAINERS_DB = "openContainers.db";
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
-  public static final String KSM_DB_NAME = "ksm.db";
+  public static final String OM_DB_NAME = "om.db";
 
   /**
    * Supports Bucket Versioning.
@@ -119,13 +119,13 @@ public final class OzoneConsts {
   public static final String OPEN_KEY_ID_DELIMINATOR = "#";
 
   /**
-   * KSM LevelDB prefixes.
+   * OM LevelDB prefixes.
    *
-   * KSM DB stores metadata as KV pairs with certain prefixes,
+   * OM DB stores metadata as KV pairs with certain prefixes,
    * prefix is used to improve the performance to get related
    * metadata.
    *
-   * KSM DB Schema:
+   * OM DB Schema:
    *  ----------------------------------------------------------
    *  |  KEY                                     |     VALUE   |
    *  ----------------------------------------------------------
@@ -140,13 +140,13 @@ public final class OzoneConsts {
    *  | #deleting#/volumeName/bucketName/keyName |  KeyInfo    |
    *  ----------------------------------------------------------
    */
-  public static final String KSM_VOLUME_PREFIX = "/#";
-  public static final String KSM_BUCKET_PREFIX = "/#";
-  public static final String KSM_KEY_PREFIX = "/";
-  public static final String KSM_USER_PREFIX = "$";
+  public static final String OM_VOLUME_PREFIX = "/#";
+  public static final String OM_BUCKET_PREFIX = "/#";
+  public static final String OM_KEY_PREFIX = "/";
+  public static final String OM_USER_PREFIX = "$";
 
   /**
-   * Max KSM Quota size of 1024 PB.
+   * Max OM Quota size of 1024 PB.
    */
   public static final long MAX_QUOTA_IN_BYTES = 1024L * 1024 * TB;
 
@@ -168,9 +168,9 @@ public final class OzoneConsts {
   public static final int INVALID_PORT = -1;
 
 
-  // The ServiceListJSONServlet context attribute where KeySpaceManager
+  // The ServiceListJSONServlet context attribute where OzoneManager
   // instance gets stored.
-  public static final String KSM_CONTEXT_ATTRIBUTE = "ozone.ksm";
+  public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om";
 
   private OzoneConsts() {
     // Never Constructed

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index fb30d92..a32d559 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -38,7 +38,7 @@ import java.util.Properties;
  * Local storage information is stored in a separate file VERSION.
  * It contains type of the node,
  * the storage layout version, the SCM id, and
- * the KSM/SCM state creation time.
+ * the OM/SCM state creation time.
  *
  */
 @InterfaceAudience.Private
@@ -127,7 +127,7 @@ public abstract class Storage {
   abstract protected Properties getNodeProperties();
 
   /**
-   * Sets the Node properties spaecific to KSM/SCM.
+   * Sets the Node properties spaecific to OM/SCM.
    */
   private void setNodeProperties() {
     Properties nodeProperties = getNodeProperties();
@@ -152,7 +152,7 @@ public abstract class Storage {
    * File {@code VERSION} contains the following fields:
    * <ol>
    * <li>node type</li>
-   * <li>KSM/SCM state creation time</li>
+   * <li>OM/SCM state creation time</li>
    * <li>other fields specific for this node type</li>
    * </ol>
    * The version file is always written last during storage directory updates.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
index 7bea82a..53f408a 100644
--- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto
@@ -46,7 +46,7 @@ message AllocateScmBlockRequestProto {
 }
 
 /**
- * A delete key request sent by KSM to SCM, it contains
+ * A delete key request sent by OM to SCM, it contains
  * multiple number of keys (and their blocks).
  */
 message DeleteScmKeyBlocksRequestProto {
@@ -56,9 +56,9 @@ message DeleteScmKeyBlocksRequestProto {
 /**
  * A object key and all its associated blocks.
  * We need to encapsulate object key name plus the blocks in this potocol
- * because SCM needs to response KSM with the keys it has deleted.
+ * because SCM needs to response OM with the keys it has deleted.
  * If the response only contains blocks, it will be very expensive for
- * KSM to figure out what keys have been deleted.
+ * OM to figure out what keys have been deleted.
  */
 message KeyBlocks {
   required string key = 1;
@@ -66,7 +66,7 @@ message KeyBlocks {
 }
 
 /**
- * A delete key response from SCM to KSM, it contains multiple child-results.
+ * A delete key response from SCM to OM, it contains multiple child-results.
  * Each child-result represents a key deletion result, only if all blocks of
  * a key are successfully deleted, this key result is considered as succeed.
  */
@@ -111,7 +111,7 @@ message AllocateScmBlockResponseProto {
 }
 
 /**
- * Protocol used from KeySpaceManager to StorageContainerManager.
+ * Protocol used from OzoneManager to StorageContainerManager.
  * See request and response messages for details of the RPC calls.
  */
 service ScmBlockLocationProtocolService {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
index b9def2a..a5ce994 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -58,9 +58,9 @@ message KeyValue {
  * Type of the node.
  */
 enum NodeType {
-    KSM = 1;
-    SCM = 2;
-    DATANODE = 3;
+    OM = 1;         // Ozone Manager
+    SCM = 2;        // Storage Container Manager
+    DATANODE = 3;   // DataNode
 }
 
 // Should we rename NodeState to DatanodeState?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/common/src/main/resources/ozone-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 568d267..530fb09 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -21,7 +21,7 @@
 <!-- there.  If ozone-site.xml does not already exist, create it.      -->
 
 <!--Tags supported are OZONE, CBLOCK, MANAGEMENT, SECURITY, PERFORMANCE,   -->
-<!--DEBUG, CLIENT, SERVER, KSM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
+<!--DEBUG, CLIENT, SERVER, OM, SCM, CRITICAL, RATIS, CONTAINER, REQUIRED, -->
 <!--REST, STORAGE, PIPELINE, STANDALONE                                    -->
 
 <configuration>
@@ -254,122 +254,122 @@
     <description>
       Tells ozone which storage handler to use. The possible values are:
       distributed - The Ozone distributed storage handler, which speaks to
-      KSM/SCM on the backend and provides REST services to clients.
+      OM/SCM on the backend and provides REST services to clients.
       local - Local Storage handler strictly for testing - To be removed.
     </description>
   </property>
   <property>
     <name>ozone.key.deleting.limit.per.task</name>
     <value>1000</value>
-    <tag>KSM, PERFORMANCE</tag>
+    <tag>OM, PERFORMANCE</tag>
     <description>
       A maximum number of keys to be scanned by key deleting service
-      per time interval in KSM. Those keys are sent to delete metadata and
+      per time interval in OM. Those keys are sent to delete metadata and
       generate transactions in SCM for next async deletion between SCM
       and DataNode.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.address</name>
+    <name>ozone.om.address</name>
     <value/>
-    <tag>KSM, REQUIRED</tag>
+    <tag>OM, REQUIRED</tag>
     <description>
-      The address of the Ozone KSM service. This allows clients to discover
-      the KSMs address.
+      The address of the Ozone OM service. This allows clients to discover
+      the address of the OM.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.group.rights</name>
+    <name>ozone.om.group.rights</name>
     <value>READ_WRITE</value>
-    <tag>KSM, SECURITY</tag>
+    <tag>OM, SECURITY</tag>
     <description>
-      Default group permissions in Ozone KSM.
+      Default group permissions in Ozone OM.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.handler.count.key</name>
+    <name>ozone.om.handler.count.key</name>
     <value>20</value>
-    <tag>KSM, PERFORMANCE</tag>
+    <tag>OM, PERFORMANCE</tag>
     <description>
-      The number of RPC handler threads for KSM service endpoints.
+      The number of RPC handler threads for OM service endpoints.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.http-address</name>
+    <name>ozone.om.http-address</name>
     <value>0.0.0.0:9874</value>
-    <tag>KSM, MANAGEMENT</tag>
+    <tag>OM, MANAGEMENT</tag>
     <description>
-      The address and the base port where the KSM web UI will listen on.
+      The address and the base port where the OM web UI will listen on.
 
       If the port is 0, then the server will start on a free port. However, it
       is best to specify a well-known port, so it is easy to connect and see
-      the KSM management UI.
+      the OM management UI.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.http-bind-host</name>
+    <name>ozone.om.http-bind-host</name>
     <value>0.0.0.0</value>
-    <tag>KSM, MANAGEMENT</tag>
+    <tag>OM, MANAGEMENT</tag>
     <description>
-      The actual address the KSM web server will bind to. If this optional
+      The actual address the OM web server will bind to. If this optional
       the address is set, it overrides only the hostname portion of
-      ozone.ksm.http-address.
+      ozone.om.http-address.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.http.enabled</name>
+    <name>ozone.om.http.enabled</name>
     <value>true</value>
-    <tag>KSM, MANAGEMENT</tag>
+    <tag>OM, MANAGEMENT</tag>
     <description>
-      Property to enable or disable KSM web user interface.
+      Property to enable or disable OM web user interface.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.https-address</name>
+    <name>ozone.om.https-address</name>
     <value>0.0.0.0:9875</value>
-    <tag>KSM, MANAGEMENT, SECURITY</tag>
+    <tag>OM, MANAGEMENT, SECURITY</tag>
     <description>
-      The address and the base port where the KSM web UI will listen
+      The address and the base port where the OM web UI will listen
       on using HTTPS.
       If the port is 0 then the server will start on a free port.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.https-bind-host</name>
+    <name>ozone.om.https-bind-host</name>
     <value>0.0.0.0</value>
-    <tag>KSM, MANAGEMENT, SECURITY</tag>
+    <tag>OM, MANAGEMENT, SECURITY</tag>
     <description>
-      The actual address the KSM web server will bind to using HTTPS.
+      The actual address the OM web server will bind to using HTTPS.
       If this optional address is set, it overrides only the hostname portion of
-      ozone.ksm.http-address.
+      ozone.om.http-address.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.keytab.file</name>
+    <name>ozone.om.keytab.file</name>
     <value/>
-    <tag>KSM, SECURITY</tag>
+    <tag>OM, SECURITY</tag>
     <description>
-      The keytab file for Kerberos authentication in KSM.
+      The keytab file for Kerberos authentication in OM.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.db.cache.size.mb</name>
+    <name>ozone.om.db.cache.size.mb</name>
     <value>128</value>
-    <tag>KSM, PERFORMANCE</tag>
+    <tag>OM, PERFORMANCE</tag>
     <description>
-      The size of KSM DB cache in MB that used for caching files.
+      The size of OM DB cache in MB that used for caching files.
       This value is set to an abnormally low value in the default configuration.
       That is to make unit testing easy. Generally, this value should be set to
       something like 16GB or more, if you intend to use Ozone at scale.
 
-      A large value for this key allows a proportionally larger amount of KSM
-      metadata to be cached in memory. This makes KSM operations faster.
+      A large value for this key allows a proportionally larger amount of OM
+      metadata to be cached in memory. This makes OM operations faster.
     </description>
   </property>
   <property>
-    <name>ozone.ksm.user.max.volume</name>
+    <name>ozone.om.user.max.volume</name>
     <value>1024</value>
-    <tag>KSM, MANAGEMENT</tag>
+    <tag>OM, MANAGEMENT</tag>
     <description>
       The maximum number of volumes a user can have on a cluster.Increasing or
       decreasing this number has no real impact on ozone cluster. This is
@@ -379,11 +379,11 @@
     </description>
   </property>
   <property>
-    <name>ozone.ksm.user.rights</name>
+    <name>ozone.om.user.rights</name>
     <value>READ_WRITE</value>
-    <tag>KSM, SECURITY</tag>
+    <tag>OM, SECURITY</tag>
     <description>
-      Default user permissions used in KSM.
+      Default user permissions used in OM.
     </description>
   </property>
   <property>
@@ -393,20 +393,20 @@
     <description>
       This is used only for testing purposes. This value is used by the local
       storage handler to simulate a REST backend. This is useful only when
-      debugging the REST front end independent of KSM and SCM. To be removed.
+      debugging the REST front end independent of OM and SCM. To be removed.
     </description>
   </property>
   <property>
     <name>ozone.metadata.dirs</name>
     <value/>
-    <tag>OZONE, KSM, SCM, CONTAINER, REQUIRED, STORAGE</tag>
+    <tag>OZONE, OM, SCM, CONTAINER, REQUIRED, STORAGE</tag>
     <description>
-      Ozone metadata is shared among KSM, which acts as the namespace
+      Ozone metadata is shared among OM, which acts as the namespace
       manager for ozone, SCM which acts as the block manager and data nodes
       which maintain the name of the key(Key Name and BlockIDs). This
       replicated and distributed metadata store is maintained under the
       directory pointed by this key. Since metadata can be I/O intensive, at
-      least on KSM and SCM we recommend having SSDs. If you have the luxury
+      least on OM and SCM we recommend having SSDs. If you have the luxury
       of mapping this path to SSDs on all machines in the cluster, that will
       be excellent.
 
@@ -417,10 +417,10 @@
   <property>
     <name>ozone.metastore.impl</name>
     <value>RocksDB</value>
-    <tag>OZONE, KSM, SCM, CONTAINER, STORAGE</tag>
+    <tag>OZONE, OM, SCM, CONTAINER, STORAGE</tag>
     <description>
       Ozone metadata store implementation. Ozone metadata are well
-      distributed to multiple services such as ksm, scm. They are stored in
+      distributed to multiple services such as ozoneManager, scm. They are stored in
       some local key-value databases. This property determines which database
       library to use. Supported value is either LevelDB or RocksDB.
     </description>
@@ -429,7 +429,7 @@
   <property>
     <name>ozone.metastore.rocksdb.statistics</name>
     <value>ALL</value>
-    <tag>OZONE, KSM, SCM, STORAGE, PERFORMANCE</tag>
+    <tag>OZONE, OM, SCM, STORAGE, PERFORMANCE</tag>
     <description>
       The statistics level of the rocksdb store. If you use any value from
       org.rocksdb.StatsLevel (eg. ALL or EXCEPT_DETAILED_TIMERS), the rocksdb
@@ -672,7 +672,7 @@
       The heartbeat interval from a data node to SCM. Yes,
       it is not three but 30, since most data nodes will heart beating via Ratis
       heartbeats. If a client is not able to talk to a data node, it will notify
-      KSM/SCM eventually. So a 30 second HB seems to work. This assumes that
+      OM/SCM eventually. So a 30 second HB seems to work. This assumes that
       replication strategy used is Ratis if not, this value should be set to
       something smaller like 3 seconds.
     </description>
@@ -808,7 +808,7 @@
     <value/>
     <tag>OZONE, SECURITY</tag>
     <description>
-      The server principal used by the SCM and KSM for web UI SPNEGO
+      The server principal used by the SCM and OM for web UI SPNEGO
       authentication when Kerberos security is enabled. This is typically set to
       HTTP/_HOST@REALM.TLD The SPNEGO server principal begins with the prefix
       HTTP/ by convention.
@@ -867,9 +867,9 @@
   <property>
     <name>ozone.key.preallocation.maxsize</name>
     <value>134217728</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
+    <tag>OZONE, OM, PERFORMANCE</tag>
     <description>
-      When a new key write request is sent to KSM, if a size is requested, at most
+      When a new key write request is sent to OM, if a size is requested, at most
       128MB of size is allocated at request time. If client needs more space for the
       write, separate block allocation requests will be made.
     </description>
@@ -938,7 +938,7 @@
   <property>
     <name>ozone.open.key.cleanup.service.interval.seconds</name>
     <value>86400</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
+    <tag>OZONE, OM, PERFORMANCE</tag>
     <description>
       A background job periodically checks open key entries and delete the expired ones. This entry controls the
       interval of this cleanup check.
@@ -948,7 +948,7 @@
   <property>
     <name>ozone.open.key.expire.threshold</name>
     <value>86400</value>
-    <tag>OZONE, KSM, PERFORMANCE</tag>
+    <tag>OZONE, OM, PERFORMANCE</tag>
     <description>
       Controls how long an open key operation is considered active. Specifically, if a key
       has been open longer than the value of this config entry, that open key is considered as
@@ -958,12 +958,12 @@
 
   <property>
     <name>hadoop.tags.custom</name>
-    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
+    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
   </property>
 
   <property>
     <name>ozone.tags.system</name>
-    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,KSM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
+    <value>OZONE,MANAGEMENT,SECURITY,PERFORMANCE,DEBUG,CLIENT,SERVER,OM,SCM,CRITICAL,RATIS,CONTAINER,REQUIRED,REST,STORAGE,PIPELINE,STANDALONE</value>
   </property>
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
index 411438a..c2ed2ad 100644
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
+++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js
@@ -270,7 +270,7 @@
       $http.get("conf?cmd=getOzoneTags")
         .then(function(response) {
           ctrl.tags = response.data;
-          var excludedTags = ['CBLOCK', 'KSM', 'SCM'];
+          var excludedTags = ['CBLOCK', 'OM', 'SCM'];
           for (var i = 0; i < excludedTags.length; i++) {
             var idx = ctrl.tags.indexOf(excludedTags[i]);
             // Remove CBLOCK related properties
@@ -302,7 +302,7 @@
       }
 
       ctrl.loadAll = function() {
-        $http.get("conf?cmd=getPropertyByTag&tags=KSM,SCM," + ctrl.tags)
+        $http.get("conf?cmd=getPropertyByTag&tags=OM,SCM," + ctrl.tags)
           .then(function(response) {
 
             ctrl.convertToArray(response.data);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
----------------------------------------------------------------------
diff --git a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
index 6825750..b52f653 100644
--- a/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
+++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html
@@ -27,8 +27,8 @@
          ng-click="$ctrl.switchto('All')">All
       </a>
       <a class="btn"
-         ng-class="$ctrl.allSelected('KSM') ? 'btn-primary' :'btn-secondary'"
-         ng-click="$ctrl.switchto('KSM')">KSM</a>
+         ng-class="$ctrl.allSelected('OM') ? 'btn-primary' :'btn-secondary'"
+         ng-click="$ctrl.switchto('OM')">OM</a>
       <a class="btn"
          ng-class="$ctrl.allSelected('SCM') ? 'btn-primary' :'btn-secondary'"
          ng-click="$ctrl.switchto('SCM')">SCM</a>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
index 4f4c755..28103be 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
@@ -28,7 +28,7 @@ import java.util.Map;
 /**
  * The DeletedBlockLog is a persisted log in SCM to keep tracking
  * container blocks which are under deletion. It maintains info
- * about under-deletion container blocks that notified by KSM,
+ * about under-deletion container blocks that notified by OM,
  * and the state how it is processed.
  */
 public interface DeletedBlockLog extends Closeable {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index 3ada8fe..c23b1fd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -53,9 +53,9 @@ import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
  * client to able to write to it.
  * <p>
  * 2. Owners - Each instance of Name service, for example, Namenode of HDFS or
- * Key Space Manager (KSM) of Ozone or CBlockServer --  is an owner. It is
- * possible to have many KSMs for a Ozone cluster and only one SCM. But SCM
- * keeps the data from each KSM in separate bucket, never mixing them. To
+ * Ozone Manager (OM) of Ozone or CBlockServer --  is an owner. It is
+ * possible to have many OMs for a Ozone cluster and only one SCM. But SCM
+ * keeps the data from each OM in separate bucket, never mixing them. To
  * write data, often we have to find all open containers for a specific owner.
  * <p>
  * 3. ReplicationType - The clients are allowed to specify what kind of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
index edbcfa1..996478c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java
@@ -33,7 +33,7 @@ import java.util.concurrent.locks.ReentrantLock;
 /**
  * Command Queue is queue of commands for the datanode.
  * <p>
- * Node manager, container Manager and key space managers can queue commands for
+ * Node manager, container Manager and Ozone managers can queue commands for
  * datanodes into this queue. These commands will be send in the order in which
  * there where queued.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index 98fe9a1..3bb284e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -152,7 +152,7 @@ public class SCMBlockProtocolServer implements ScmBlockLocationProtocol {
   @Override
   public List<DeleteBlockGroupResult> deleteKeyBlocks(
       List<BlockGroup> keyBlocksInfoList) throws IOException {
-    LOG.info("SCM is informed by KSM to delete {} blocks", keyBlocksInfoList
+    LOG.info("SCM is informed by OM to delete {} blocks", keyBlocksInfoList
         .size());
     List<DeleteBlockGroupResult> results = new ArrayList<>();
     for (BlockGroup keyBlocks : keyBlocksInfoList) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
index 727c81a..7828445 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java
@@ -27,7 +27,7 @@ import java.io.IOException;
 import java.net.URISyntaxException;
 
 /**
- * This class is the base CLI for scm, ksm and scmadm.
+ * This class is the base CLI for scm, om and scmadm.
  */
 public abstract class OzoneBaseCLI extends Configured implements Tool {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
index c741588..6d6fea0 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/basic.robot
@@ -39,12 +39,12 @@ Test rest interface
                     Should contain      ${result}       200 OK
 
 Check webui static resources
-    ${result} =			Execute on		scm		curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
+    ${result} =			Execute on		scm		            curl -s -I http://localhost:9876/static/bootstrap-3.3.7/js/bootstrap.min.js
 	 Should contain		${result}		200
-    ${result} =			Execute on		ksm		curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
+    ${result} =			Execute on		ozoneManager		curl -s -I http://localhost:9874/static/bootstrap-3.3.7/js/bootstrap.min.js
 	 Should contain		${result}		200
 
 Start freon testing
-    ${result} =		Execute on		ksm		ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
+    ${result} =		Execute on		ozoneManager		ozone freon -numOfVolumes 5 -numOfBuckets 5 -numOfKeys 5 -numOfThreads 10
 	 Wait Until Keyword Succeeds	3min	10sec		Should contain		${result}		Number of Keys added: 125
 	 Should Not Contain		${result}		ERROR

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
index b50f42d..99f2831 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-compose.yaml
@@ -25,18 +25,18 @@ services:
       command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
-   ksm:
+   ozoneManager:
       image: apache/hadoop-runner
-      hostname: ksm
+      hostname: ozoneManager
       volumes:
          - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","ksm"]
+      command: ["/opt/hadoop/bin/ozone","om"]
    scm:
       image: apache/hadoop-runner
       volumes:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
index c3ec2ef..b72085b 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/docker-config
@@ -14,8 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-OZONE-SITE.XML_ozone.ksm.address=ksm
-OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
+OZONE-SITE.XML_ozone.om.address=ozoneManager
+OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
index 9521ad6..f4be3e0 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/basic/ozone-shell.robot
@@ -28,22 +28,22 @@ ${PROJECTDIR}           ${CURDIR}/../../../../../..
 
 *** Test Cases ***
 RestClient without http port
-   Test ozone shell       http://          ksm              restwoport        True
+   Test ozone shell       http://          ozoneManager          restwoport        True
 
 RestClient with http port
-   Test ozone shell       http://          ksm:9874         restwport         True
+   Test ozone shell       http://          ozoneManager:9874     restwport         True
 
 RestClient without host name
-   Test ozone shell       http://          ${EMPTY}         restwohost        True
+   Test ozone shell       http://          ${EMPTY}              restwohost        True
 
 RpcClient with port
-   Test ozone shell       o3://            ksm:9862         rpcwoport         False
+   Test ozone shell       o3://            ozoneManager:9862     rpcwoport         False
 
 RpcClient without host
-   Test ozone shell       o3://            ${EMPTY}         rpcwport          False
+   Test ozone shell       o3://            ${EMPTY}              rpcwport          False
 
 RpcClient without scheme
-   Test ozone shell       ${EMPTY}         ${EMPTY}         rpcwoscheme       False
+   Test ozone shell       ${EMPTY}         ${EMPTY}              rpcwoscheme       False
 
 
 *** Keywords ***
@@ -52,7 +52,7 @@ Test ozone shell
     ${result} =     Execute on          datanode        ozone oz -createVolume ${protocol}${server}/${volume} -user bilbo -quota 100TB -root
                     Should not contain  ${result}       Failed
                     Should contain      ${result}       Creating Volume: ${volume}
-    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ksm -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
+    ${result} =     Execute on          datanode        ozone oz -listVolume o3://ozoneManager -user bilbo | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
                     Should contain      ${result}       createdOn
                     Execute on          datanode        ozone oz -updateVolume ${protocol}${server}/${volume} -user bill -quota 10TB
     ${result} =     Execute on          datanode        ozone oz -infoVolume ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
@@ -66,7 +66,7 @@ Test ozone shell
                     Should Be Equal     ${result}       GROUP
     ${result} =     Execute on          datanode        ozone oz -updateBucket ${protocol}${server}/${volume}/bb1 -removeAcl group:samwise:r | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .acls | .[] | select(.name=="frodo") | .type'
                     Should Be Equal     ${result}       USER
-    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ksm/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+    ${result} =     Execute on          datanode        ozone oz -listBucket o3://ozoneManager/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
                     Should Be Equal     ${result}       ${volume}
                     Run Keyword and Return If           ${withkeytest}        Test key handling       ${protocol}       ${server}       ${volume}
                     Execute on          datanode        ozone oz -deleteBucket ${protocol}${server}/${volume}/bb1
@@ -80,6 +80,6 @@ Test key handling
                     Execute on          datanode        ls -l NOTICE.txt.1
     ${result} =     Execute on          datanode        ozone oz -infoKey ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
                     Should contain      ${result}       createdOn
-    ${result} =     Execute on          datanode        ozone oz -listKey o3://ksm/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+    ${result} =     Execute on          datanode        ozone oz -listKey o3://ozoneManager/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
                     Should Be Equal     ${result}       key1
                     Execute on          datanode        ozone oz -deleteKey ${protocol}${server}/${volume}/bb1/key1 -v

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
index a5ea30a..9235cd9 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/commonlib.robot
@@ -21,12 +21,12 @@ Startup Ozone cluster with size
                                              Run                         echo "Starting new docker-compose environment" >> docker-compose.log
     ${rc}        ${output} =                 Run docker compose          up -d
     Should Be Equal As Integers             ${rc}                       0
-    Wait Until Keyword Succeeds             1min    5sec    Is Daemon started   ksm     HTTP server of KSM is listening
+    Wait Until Keyword Succeeds             1min    5sec    Is Daemon started   ozoneManager     HTTP server of OZONEMANAGER is listening
     Daemons are running without error
     Scale datanodes up                      5
 
 Daemons are running without error
-    Is daemon running without error           ksm
+    Is daemon running without error           ozoneManager
     Is daemon running without error           scm
     Is daemon running without error           datanode
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
index 12022df..6b7b7bd 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-compose.yaml
@@ -25,18 +25,18 @@ services:
       command: ["/opt/hadoop/bin/ozone","datanode"]
       env_file:
         - ./docker-config
-   ksm:
+   ozoneManager:
       image: apache/hadoop-runner
-      hostname: ksm
+      hostname: ozoneManager
       volumes:
          - ${OZONEDIR}:/opt/hadoop
       ports:
          - 9874
       environment:
-         ENSURE_KSM_INITIALIZED: /data/metadata/ksm/current/VERSION
+         ENSURE_OM_INITIALIZED: /data/metadata/ozoneManager/current/VERSION
       env_file:
           - ./docker-config
-      command: ["/opt/hadoop/bin/ozone","ksm"]
+      command: ["/opt/hadoop/bin/ozone","om"]
    scm:
       image: apache/hadoop-runner
       volumes:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
index e06d434..b0129bc 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/docker-config
@@ -15,8 +15,8 @@
 # limitations under the License.
 
 CORE-SITE.XML_fs.o3.impl=org.apache.hadoop.fs.ozone.OzoneFileSystem
-OZONE-SITE.XML_ozone.ksm.address=ksm
-OZONE-SITE.XML_ozone.ksm.http-address=ksm:9874
+OZONE-SITE.XML_ozone.om.address=ozoneManager
+OZONE-SITE.XML_ozone.om.http-address=ozoneManager:9874
 OZONE-SITE.XML_ozone.scm.names=scm
 OZONE-SITE.XML_ozone.enabled=True
 OZONE-SITE.XML_ozone.scm.datanode.id=/data/datanode.id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
----------------------------------------------------------------------
diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
index 9e8a5d2..ea473c0 100644
--- a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
+++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonefs.robot
@@ -27,13 +27,13 @@ ${PROJECTDIR}           ${CURDIR}/../../../../../..
 
 *** Test Cases ***
 Create volume and bucket
-    Execute on          datanode        ozone oz -createVolume http://ksm/fstest -user bilbo -quota 100TB -root
-    Execute on          datanode        ozone oz -createBucket http://ksm/fstest/bucket1
+    Execute on          datanode        ozone oz -createVolume http://ozoneManager/fstest -user bilbo -quota 100TB -root
+    Execute on          datanode        ozone oz -createBucket http://ozoneManager/fstest/bucket1
 
 Check volume from ozonefs
     ${result} =         Execute on          hadooplast        hdfs dfs -ls o3://bucket1.fstest/
 
 Create directory from ozonefs
                         Execute on          hadooplast        hdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep
-    ${result} =         Execute on          ksm               ozone oz -listKey o3://ksm/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute on          ozoneManager      ozone oz -listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
                                             Should contain    ${result}         testdir/deep

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
index 39b7bb8..0da52dc 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java
@@ -81,7 +81,7 @@ public final class BucketArgs {
   }
 
   /**
-   * Returns new builder class that builds a KsmBucketInfo.
+   * Returns new builder class that builds a OmBucketInfo.
    *
    * @return Builder
    */
@@ -90,7 +90,7 @@ public final class BucketArgs {
   }
 
   /**
-   * Builder for KsmBucketInfo.
+   * Builder for OmBucketInfo.
    */
   public static class Builder {
     private Boolean versioning;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
index 3085b0d..de0d166 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.client;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.KsmUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rest.RestClient;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
@@ -34,11 +34,9 @@ import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Proxy;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_CLIENT_PROTOCOL;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_HTTP_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_PROTOCOL;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
 
 /**
  * Factory class to create different types of OzoneClients.
@@ -97,46 +95,46 @@ public final class OzoneClientFactory {
   /**
    * Returns an OzoneClient which will use RPC protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
    * @return OzoneClient
    *
    * @throws IOException
    */
-  public static OzoneClient getRpcClient(String ksmHost)
+  public static OzoneClient getRpcClient(String omHost)
       throws IOException {
     Configuration config = new OzoneConfiguration();
-    int port = KsmUtils.getKsmRpcPort(config);
-    return getRpcClient(ksmHost, port, config);
+    int port = OmUtils.getOmRpcPort(config);
+    return getRpcClient(omHost, port, config);
   }
 
   /**
    * Returns an OzoneClient which will use RPC protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
-   * @param ksmRpcPort
-   *        RPC port of KeySpaceManager.
+   * @param omRpcPort
+   *        RPC port of OzoneManager.
    *
    * @return OzoneClient
    *
    * @throws IOException
    */
-  public static OzoneClient getRpcClient(String ksmHost, Integer ksmRpcPort)
+  public static OzoneClient getRpcClient(String omHost, Integer omRpcPort)
       throws IOException {
-    return getRpcClient(ksmHost, ksmRpcPort, new OzoneConfiguration());
+    return getRpcClient(omHost, omRpcPort, new OzoneConfiguration());
   }
 
   /**
    * Returns an OzoneClient which will use RPC protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
-   * @param ksmRpcPort
-   *        RPC port of KeySpaceManager.
+   * @param omRpcPort
+   *        RPC port of OzoneManager.
    *
    * @param config
    *        Configuration to be used for OzoneClient creation
@@ -145,13 +143,13 @@ public final class OzoneClientFactory {
    *
    * @throws IOException
    */
-  public static OzoneClient getRpcClient(String ksmHost, Integer ksmRpcPort,
+  public static OzoneClient getRpcClient(String omHost, Integer omRpcPort,
                                          Configuration config)
       throws IOException {
-    Preconditions.checkNotNull(ksmHost);
-    Preconditions.checkNotNull(ksmRpcPort);
+    Preconditions.checkNotNull(omHost);
+    Preconditions.checkNotNull(omRpcPort);
     Preconditions.checkNotNull(config);
-    config.set(OZONE_KSM_ADDRESS_KEY, ksmHost + ":" + ksmRpcPort);
+    config.set(OZONE_OM_ADDRESS_KEY, omHost + ":" + omRpcPort);
     return getRpcClient(config);
   }
 
@@ -175,46 +173,46 @@ public final class OzoneClientFactory {
   /**
    * Returns an OzoneClient which will use REST protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
    * @return OzoneClient
    *
    * @throws IOException
    */
-  public static OzoneClient getRestClient(String ksmHost)
+  public static OzoneClient getRestClient(String omHost)
       throws IOException {
     Configuration config = new OzoneConfiguration();
-    int port = KsmUtils.getKsmRestPort(config);
-    return getRestClient(ksmHost, port, config);
+    int port = OmUtils.getOmRestPort(config);
+    return getRestClient(omHost, port, config);
   }
 
   /**
    * Returns an OzoneClient which will use REST protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
-   * @param ksmHttpPort
-   *        HTTP port of KeySpaceManager.
+   * @param omHttpPort
+   *        HTTP port of OzoneManager.
    *
    * @return OzoneClient
    *
    * @throws IOException
    */
-  public static OzoneClient getRestClient(String ksmHost, Integer ksmHttpPort)
+  public static OzoneClient getRestClient(String omHost, Integer omHttpPort)
       throws IOException {
-    return getRestClient(ksmHost, ksmHttpPort, new OzoneConfiguration());
+    return getRestClient(omHost, omHttpPort, new OzoneConfiguration());
   }
 
   /**
    * Returns an OzoneClient which will use REST protocol.
    *
-   * @param ksmHost
-   *        hostname of KeySpaceManager to connect.
+   * @param omHost
+   *        hostname of OzoneManager to connect.
    *
-   * @param ksmHttpPort
-   *        HTTP port of KeySpaceManager.
+   * @param omHttpPort
+   *        HTTP port of OzoneManager.
    *
    * @param config
    *        Configuration to be used for OzoneClient creation
@@ -223,13 +221,13 @@ public final class OzoneClientFactory {
    *
    * @throws IOException
    */
-  public static OzoneClient getRestClient(String ksmHost, Integer ksmHttpPort,
+  public static OzoneClient getRestClient(String omHost, Integer omHttpPort,
                                           Configuration config)
       throws IOException {
-    Preconditions.checkNotNull(ksmHost);
-    Preconditions.checkNotNull(ksmHttpPort);
+    Preconditions.checkNotNull(omHost);
+    Preconditions.checkNotNull(omHttpPort);
     Preconditions.checkNotNull(config);
-    config.set(OZONE_KSM_HTTP_ADDRESS_KEY, ksmHost + ":" +  ksmHttpPort);
+    config.set(OZONE_OM_HTTP_ADDRESS_KEY, omHost + ":" + omHttpPort);
     return getRestClient(config);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
index 0c723dd..7c93146 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java
@@ -49,7 +49,7 @@ public class OzoneKey {
   private long modificationTime;
 
   /**
-   * Constructs OzoneKey from KsmKeyInfo.
+   * Constructs OzoneKey from OmKeyInfo.
    *
    */
   public OzoneKey(String volumeName, String bucketName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
index f1aa031..ae1cfcc 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java
@@ -77,7 +77,7 @@ public final class VolumeArgs {
     return acls;
   }
   /**
-   * Returns new builder class that builds a KsmVolumeArgs.
+   * Returns new builder class that builds a OmVolumeArgs.
    *
    * @return Builder
    */
@@ -86,7 +86,7 @@ public final class VolumeArgs {
   }
 
   /**
-   * Builder for KsmVolumeArgs.
+   * Builder for OmVolumeArgs.
    */
   public static class Builder {
     private String adminName;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index edd85aa..b3a566e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -23,8 +23,8 @@ import org.apache.hadoop.fs.Seekable;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
@@ -255,28 +255,29 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
     }
   }
 
-  public static LengthInputStream getFromKsmKeyInfo(KsmKeyInfo keyInfo,
+  public static LengthInputStream getFromOmKeyInfo(
+      OmKeyInfo keyInfo,
       XceiverClientManager xceiverClientManager,
       StorageContainerLocationProtocolClientSideTranslatorPB
-          storageContainerLocationClient, String requestId)
-      throws IOException {
+          storageContainerLocationClient,
+      String requestId) throws IOException {
     long length = 0;
     long containerKey;
     ChunkGroupInputStream groupInputStream = new ChunkGroupInputStream();
     groupInputStream.key = keyInfo.getKeyName();
-    List<KsmKeyLocationInfo> keyLocationInfos =
+    List<OmKeyLocationInfo> keyLocationInfos =
         keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
     groupInputStream.streamOffset = new long[keyLocationInfos.size()];
     for (int i = 0; i < keyLocationInfos.size(); i++) {
-      KsmKeyLocationInfo ksmKeyLocationInfo = keyLocationInfos.get(i);
-      BlockID blockID = ksmKeyLocationInfo.getBlockID();
+      OmKeyLocationInfo omKeyLocationInfo = keyLocationInfos.get(i);
+      BlockID blockID = omKeyLocationInfo.getBlockID();
       long containerID = blockID.getContainerID();
       ContainerWithPipeline containerWithPipeline =
           storageContainerLocationClient.getContainerWithPipeline(containerID);
       XceiverClientSpi xceiverClient = xceiverClientManager
           .acquireClient(containerWithPipeline.getPipeline(), containerID);
       boolean success = false;
-      containerKey = ksmKeyLocationInfo.getLocalID();
+      containerKey = omKeyLocationInfo.getLocalID();
       try {
         LOG.debug("get key accessing {} {}",
             containerID, containerKey);
@@ -292,11 +293,10 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
         }
         success = true;
         ChunkInputStream inputStream = new ChunkInputStream(
-            ksmKeyLocationInfo.getBlockID(), xceiverClientManager,
-            xceiverClient,
+            omKeyLocationInfo.getBlockID(), xceiverClientManager, xceiverClient,
             chunks, requestId);
         groupInputStream.addStream(inputStream,
-            ksmKeyLocationInfo.getLength());
+            omKeyLocationInfo.getLength());
       } finally {
         if (!success) {
           xceiverClientManager.releaseClient(xceiverClient);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index d1a3b46..9443317 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -24,15 +24,15 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -67,10 +67,10 @@ public class ChunkGroupOutputStream extends OutputStream {
   private final ArrayList<ChunkOutputStreamEntry> streamEntries;
   private int currentStreamIndex;
   private long byteOffset;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB ksmClient;
+  private final OzoneManagerProtocolClientSideTranslatorPB omClient;
   private final
       StorageContainerLocationProtocolClientSideTranslatorPB scmClient;
-  private final KsmKeyArgs keyArgs;
+  private final OmKeyArgs keyArgs;
   private final int openID;
   private final XceiverClientManager xceiverClientManager;
   private final int chunkSize;
@@ -83,7 +83,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   @VisibleForTesting
   public ChunkGroupOutputStream() {
     streamEntries = new ArrayList<>();
-    ksmClient = null;
+    omClient = null;
     scmClient = null;
     keyArgs = null;
     openID = -1;
@@ -113,16 +113,16 @@ public class ChunkGroupOutputStream extends OutputStream {
   public ChunkGroupOutputStream(
       OpenKeySession handler, XceiverClientManager xceiverClientManager,
       StorageContainerLocationProtocolClientSideTranslatorPB scmClient,
-      KeySpaceManagerProtocolClientSideTranslatorPB ksmClient,
+      OzoneManagerProtocolClientSideTranslatorPB omClient,
       int chunkSize, String requestId, ReplicationFactor factor,
       ReplicationType type) throws IOException {
     this.streamEntries = new ArrayList<>();
     this.currentStreamIndex = 0;
     this.byteOffset = 0;
-    this.ksmClient = ksmClient;
+    this.omClient = omClient;
     this.scmClient = scmClient;
-    KsmKeyInfo info = handler.getKeyInfo();
-    this.keyArgs = new KsmKeyArgs.Builder()
+    OmKeyInfo info = handler.getKeyInfo();
+    this.keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(info.getVolumeName())
         .setBucketName(info.getBucketName())
         .setKeyName(info.getKeyName())
@@ -150,19 +150,19 @@ public class ChunkGroupOutputStream extends OutputStream {
    * @param openVersion the version corresponding to the pre-allocation.
    * @throws IOException
    */
-  public void addPreallocateBlocks(KsmKeyLocationInfoGroup version,
+  public void addPreallocateBlocks(OmKeyLocationInfoGroup version,
       long openVersion) throws IOException {
     // server may return any number of blocks, (0 to any)
     // only the blocks allocated in this open session (block createVersion
     // equals to open session version)
-    for (KsmKeyLocationInfo subKeyInfo : version.getLocationList()) {
+    for (OmKeyLocationInfo subKeyInfo : version.getLocationList()) {
       if (subKeyInfo.getCreateVersion() == openVersion) {
         checkKeyLocationInfo(subKeyInfo);
       }
     }
   }
 
-  private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo)
+  private void checkKeyLocationInfo(OmKeyLocationInfo subKeyInfo)
       throws IOException {
     ContainerWithPipeline containerWithPipeline = scmClient
         .getContainerWithPipeline(subKeyInfo.getContainerID());
@@ -210,7 +210,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     checkNotClosed();
 
     if (streamEntries.size() <= currentStreamIndex) {
-      Preconditions.checkNotNull(ksmClient);
+      Preconditions.checkNotNull(omClient);
       // allocate a new block, if a exception happens, log an error and
       // throw exception to the caller directly, and the write fails.
       try {
@@ -258,7 +258,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     int succeededAllocates = 0;
     while (len > 0) {
       if (streamEntries.size() <= currentStreamIndex) {
-        Preconditions.checkNotNull(ksmClient);
+        Preconditions.checkNotNull(omClient);
         // allocate a new block, if a exception happens, log an error and
         // throw exception to the caller directly, and the write fails.
         try {
@@ -286,7 +286,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   }
 
   /**
-   * Contact KSM to get a new block. Set the new block with the index (e.g.
+   * Contact OM to get a new block. Set the new block with the index (e.g.
    * first block has index = 0, second has index = 1 etc.)
    *
    * The returned block is made to new ChunkOutputStreamEntry to write.
@@ -295,7 +295,7 @@ public class ChunkGroupOutputStream extends OutputStream {
    * @throws IOException
    */
   private void allocateNewBlock(int index) throws IOException {
-    KsmKeyLocationInfo subKeyInfo = ksmClient.allocateBlock(keyArgs, openID);
+    OmKeyLocationInfo subKeyInfo = omClient.allocateBlock(keyArgs, openID);
     checkKeyLocationInfo(subKeyInfo);
   }
 
@@ -311,7 +311,7 @@ public class ChunkGroupOutputStream extends OutputStream {
   }
 
   /**
-   * Commit the key to KSM, this will add the blocks as the new key blocks.
+   * Commit the key to OM, this will add the blocks as the new key blocks.
    *
    * @throws IOException
    */
@@ -329,7 +329,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     if (keyArgs != null) {
       // in test, this could be null
       keyArgs.setDataSize(byteOffset);
-      ksmClient.commitKey(keyArgs, openID);
+      omClient.commitKey(keyArgs, openID);
     } else {
       LOG.warn("Closing ChunkGroupOutputStream, but key args is null");
     }
@@ -342,7 +342,7 @@ public class ChunkGroupOutputStream extends OutputStream {
     private OpenKeySession openHandler;
     private XceiverClientManager xceiverManager;
     private StorageContainerLocationProtocolClientSideTranslatorPB scmClient;
-    private KeySpaceManagerProtocolClientSideTranslatorPB ksmClient;
+    private OzoneManagerProtocolClientSideTranslatorPB omClient;
     private int chunkSize;
     private String requestID;
     private ReplicationType type;
@@ -364,9 +364,9 @@ public class ChunkGroupOutputStream extends OutputStream {
       return this;
     }
 
-    public Builder setKsmClient(
-        KeySpaceManagerProtocolClientSideTranslatorPB client) {
-      this.ksmClient = client;
+    public Builder setOmClient(
+        OzoneManagerProtocolClientSideTranslatorPB client) {
+      this.omClient = client;
       return this;
     }
 
@@ -392,7 +392,7 @@ public class ChunkGroupOutputStream extends OutputStream {
 
     public ChunkGroupOutputStream build() throws IOException {
       return new ChunkGroupOutputStream(openHandler, xceiverManager, scmClient,
-          ksmClient, chunkSize, requestID, factor, type);
+          omClient, chunkSize, requestID, factor, type);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
index 93b3417..abdc2fb 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.ozone.client.rest;
 
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 
 import java.util.List;
 import java.util.Random;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
index 6e3f617..78fbe8d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
@@ -45,10 +45,9 @@ import org.apache.hadoop.ozone.client.rest.headers.Header;
 import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
 import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
 import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
 import org.apache.hadoop.ozone.web.response.ListBuckets;
 import org.apache.hadoop.ozone.web.response.ListKeys;
 import org.apache.hadoop.ozone.web.response.ListVolumes;
@@ -152,8 +151,8 @@ public class RestClient implements ClientProtocol {
                   .build())
           .build();
       this.ugi = UserGroupInformation.getCurrentUser();
-      this.userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
-          KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
+      this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
+          OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
 
       // TODO: Add new configuration parameter to configure RestServerSelector.
       RestServerSelector defaultSelector = new DefaultRestServerSelector();
@@ -171,11 +170,11 @@ public class RestClient implements ClientProtocol {
 
   private InetSocketAddress getOzoneRestServerAddress(
       RestServerSelector selector) throws IOException {
-    String httpAddress = conf.get(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY);
+    String httpAddress = conf.get(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY);
 
     if (httpAddress == null) {
       throw new IllegalArgumentException(
-          KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY + " must be defined. See" +
+          OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY + " must be defined. See" +
               " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
               " details on configuring Ozone.");
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-8302. ATS v2 should handle HBase connection issue properly. Contributed by Billie Rinaldi.

Posted by vi...@apache.org.
YARN-8302. ATS v2 should handle HBase connection issue properly. Contributed by Billie Rinaldi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba683204
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba683204
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba683204

Branch: refs/heads/HDFS-12090
Commit: ba683204498c97654be4727ab9e128c433a45498
Parents: 0247cb6
Author: Rohith Sharma K S <ro...@apache.org>
Authored: Fri Jul 6 15:19:01 2018 -0700
Committer: Rohith Sharma K S <ro...@apache.org>
Committed: Fri Jul 6 15:19:01 2018 -0700

----------------------------------------------------------------------
 .../hadoop/yarn/conf/YarnConfiguration.java     |   7 +
 .../storage/TestTimelineReaderHBaseDown.java    | 220 +++++++++++++++++++
 .../storage/HBaseTimelineReaderImpl.java        |  93 ++++++++
 3 files changed, 320 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba683204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5842d64..9156c2d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3659,6 +3659,13 @@ public class YarnConfiguration extends Configuration {
       DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS =
       DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS;
 
+  @Private
+  public static final String
+      TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS =
+      TIMELINE_SERVICE_READER_PREFIX + "storage-monitor.interval-ms";
+  public static final long
+      DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS = 60 * 1000;
+
   /**
    * Marked collector properties as Private since it run as auxillary service.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba683204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
new file mode 100644
index 0000000..786f529
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestTimelineReaderHBaseDown.java
@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.timelineservice.storage;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.service.Service;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
+import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.yarn.conf.YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS;
+import static org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl.DATA_TO_RETRIEVE;
+import static org.apache.hadoop.yarn.server.timelineservice.storage.HBaseTimelineReaderImpl.MONITOR_FILTERS;
+
+public class TestTimelineReaderHBaseDown {
+
+  @Test(timeout=300000)
+  public void testTimelineReaderHBaseUp() throws Exception {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+    try {
+      util.startMiniCluster();
+      DataGeneratorForTest.createSchema(util.getConfiguration());
+      DataGeneratorForTest.loadApps(util, System.currentTimeMillis());
+
+      TimelineReaderServer server = getTimelineReaderServer();
+      server.init(util.getConfiguration());
+      HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+      server.start();
+      checkQuery(htr);
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testTimelineReaderInitWhenHBaseIsDown() throws
+      TimeoutException, InterruptedException {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+    TimelineReaderServer server = getTimelineReaderServer();
+
+    // init timeline reader when hbase is not running
+    server.init(util.getConfiguration());
+    HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+    server.start();
+    waitForHBaseDown(htr);
+  }
+
+  @Test(timeout=300000)
+  public void testTimelineReaderDetectsHBaseDown() throws Exception {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+
+    try {
+      // start minicluster
+      util.startMiniCluster();
+      DataGeneratorForTest.createSchema(util.getConfiguration());
+      DataGeneratorForTest.loadApps(util, System.currentTimeMillis());
+
+      // init timeline reader
+      TimelineReaderServer server = getTimelineReaderServer();
+      server.init(util.getConfiguration());
+      HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+
+      // stop hbase after timeline reader init
+      util.shutdownMiniHBaseCluster();
+
+      // start server and check that it detects hbase is down
+      server.start();
+      waitForHBaseDown(htr);
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testTimelineReaderDetectsZooKeeperDown() throws Exception {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+
+    try {
+      // start minicluster
+      util.startMiniCluster();
+      DataGeneratorForTest.createSchema(util.getConfiguration());
+      DataGeneratorForTest.loadApps(util, System.currentTimeMillis());
+
+      // init timeline reader
+      TimelineReaderServer server = getTimelineReaderServer();
+      server.init(util.getConfiguration());
+      HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+
+      // stop hbase and zookeeper after timeline reader init
+      util.shutdownMiniCluster();
+
+      // start server and check that it detects hbase is down
+      server.start();
+      waitForHBaseDown(htr);
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testTimelineReaderRecoversAfterHBaseReturns() throws Exception {
+    HBaseTestingUtility util = new HBaseTestingUtility();
+    configure(util);
+
+    try {
+      // start minicluster
+      util.startMiniCluster();
+      DataGeneratorForTest.createSchema(util.getConfiguration());
+      DataGeneratorForTest.loadApps(util, System.currentTimeMillis());
+
+      // init timeline reader
+      TimelineReaderServer server = getTimelineReaderServer();
+      server.init(util.getConfiguration());
+      HBaseTimelineReaderImpl htr = getHBaseTimelineReaderImpl(server);
+
+      // stop hbase after timeline reader init
+      util.shutdownMiniHBaseCluster();
+
+      // start server and check that it detects hbase is down
+      server.start();
+      waitForHBaseDown(htr);
+
+      util.startMiniHBaseCluster(1, 1);
+      GenericTestUtils.waitFor(() -> !htr.isHBaseDown(), 1000, 150000);
+    } finally {
+      util.shutdownMiniCluster();
+    }
+  }
+
+  private static void waitForHBaseDown(HBaseTimelineReaderImpl htr) throws
+      TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(() -> htr.isHBaseDown(), 1000, 150000);
+    try {
+      checkQuery(htr);
+      Assert.fail("Query should fail when HBase is down");
+    } catch (IOException e) {
+      Assert.assertEquals("HBase is down", e.getMessage());
+    }
+  }
+
+  private static void checkQuery(HBaseTimelineReaderImpl htr) throws
+      IOException {
+    TimelineReaderContext context =
+        new TimelineReaderContext(YarnConfiguration.DEFAULT_RM_CLUSTER_ID,
+            null, null, null, null, TimelineEntityType
+            .YARN_FLOW_ACTIVITY.toString(), null, null);
+    Set<TimelineEntity> entities = htr.getEntities(context, MONITOR_FILTERS,
+        DATA_TO_RETRIEVE);
+  }
+
+  private static void configure(HBaseTestingUtility util) {
+    Configuration config = util.getConfiguration();
+    config.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    config.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
+    config.set(YarnConfiguration.TIMELINE_SERVICE_READER_WEBAPP_ADDRESS,
+        "localhost:0");
+    config.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
+    config.set(YarnConfiguration.TIMELINE_SERVICE_READER_CLASS,
+        "org.apache.hadoop.yarn.server.timelineservice.storage."
+            + "HBaseTimelineReaderImpl");
+    config.setInt("hfile.format.version", 3);
+    config.setLong(TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS, 5000);
+  }
+
+  private static TimelineReaderServer getTimelineReaderServer() {
+    return new TimelineReaderServer() {
+      @Override
+      protected void addFilters(Configuration conf) {
+        // The parent code uses hadoop-common jar from this version of
+        // Hadoop, but the tests are using hadoop-common jar from
+        // ${hbase-compatible-hadoop.version}.  This version uses Jetty 9
+        // while ${hbase-compatible-hadoop.version} uses Jetty 6, and there
+        // are many differences, including classnames and packages.
+        // We do nothing here, so that we don't cause a NoSuchMethodError or
+        // NoClassDefFoundError.
+        // Once ${hbase-compatible-hadoop.version} is changed to Hadoop 3,
+        // we should be able to remove this @Override.
+      }
+    };
+  }
+
+  private static HBaseTimelineReaderImpl getHBaseTimelineReaderImpl(
+      TimelineReaderServer server) {
+    for (Service s: server.getServices()) {
+      if (s instanceof HBaseTimelineReaderImpl) {
+        return (HBaseTimelineReaderImpl) s;
+      }
+    }
+    throw new IllegalStateException("Couldn't find HBaseTimelineReaderImpl");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba683204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
index 1ebfab2..fadfd14 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/HBaseTimelineReaderImpl.java
@@ -20,12 +20,18 @@ package org.apache.hadoop.yarn.server.timelineservice.storage;
 
 import java.io.IOException;
 import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters;
 import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext;
@@ -47,6 +53,12 @@ public class HBaseTimelineReaderImpl
 
   private Configuration hbaseConf = null;
   private Connection conn;
+  private Configuration monitorHBaseConf = null;
+  private Connection monitorConn;
+  private ScheduledExecutorService monitorExecutorService;
+  private TimelineReaderContext monitorContext;
+  private long monitorInterval;
+  private AtomicBoolean hbaseDown = new AtomicBoolean();
 
   public HBaseTimelineReaderImpl() {
     super(HBaseTimelineReaderImpl.class.getName());
@@ -55,22 +67,72 @@ public class HBaseTimelineReaderImpl
   @Override
   public void serviceInit(Configuration conf) throws Exception {
     super.serviceInit(conf);
+
+    String clusterId = conf.get(
+        YarnConfiguration.RM_CLUSTER_ID,
+        YarnConfiguration.DEFAULT_RM_CLUSTER_ID);
+    monitorContext =
+        new TimelineReaderContext(clusterId, null, null, null, null,
+            TimelineEntityType.YARN_FLOW_ACTIVITY.toString(), null, null);
+    monitorInterval = conf.getLong(
+        YarnConfiguration.TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS);
+
+    monitorHBaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
+    monitorHBaseConf.setInt("hbase.client.retries.number", 3);
+    monitorHBaseConf.setLong("hbase.client.pause", 1000);
+    monitorHBaseConf.setLong("hbase.rpc.timeout", monitorInterval);
+    monitorHBaseConf.setLong("hbase.client.scanner.timeout.period",
+        monitorInterval);
+    monitorHBaseConf.setInt("zookeeper.recovery.retry", 1);
+    monitorConn = ConnectionFactory.createConnection(monitorHBaseConf);
+
+    monitorExecutorService = Executors.newScheduledThreadPool(1);
+
     hbaseConf = HBaseTimelineStorageUtils.getTimelineServiceHBaseConf(conf);
     conn = ConnectionFactory.createConnection(hbaseConf);
   }
 
   @Override
+  protected void serviceStart() throws Exception {
+    super.serviceStart();
+    LOG.info("Scheduling HBase liveness monitor at interval {}",
+        monitorInterval);
+    monitorExecutorService.scheduleAtFixedRate(new HBaseMonitor(), 0,
+        monitorInterval, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
   protected void serviceStop() throws Exception {
     if (conn != null) {
       LOG.info("closing the hbase Connection");
       conn.close();
     }
+    if (monitorExecutorService != null) {
+      monitorExecutorService.shutdownNow();
+      if (!monitorExecutorService.awaitTermination(30, TimeUnit.SECONDS)) {
+        LOG.warn("failed to stop the monitir task in time. " +
+            "will still proceed to close the monitor.");
+      }
+    }
+    monitorConn.close();
     super.serviceStop();
   }
 
+  private void checkHBaseDown() throws IOException {
+    if (hbaseDown.get()) {
+      throw new IOException("HBase is down");
+    }
+  }
+
+  public boolean isHBaseDown() {
+    return hbaseDown.get();
+  }
+
   @Override
   public TimelineEntity getEntity(TimelineReaderContext context,
       TimelineDataToRetrieve dataToRetrieve) throws IOException {
+    checkHBaseDown();
     TimelineEntityReader reader =
         TimelineEntityReaderFactory.createSingleEntityReader(context,
             dataToRetrieve);
@@ -81,6 +143,7 @@ public class HBaseTimelineReaderImpl
   public Set<TimelineEntity> getEntities(TimelineReaderContext context,
       TimelineEntityFilters filters, TimelineDataToRetrieve dataToRetrieve)
       throws IOException {
+    checkHBaseDown();
     TimelineEntityReader reader =
         TimelineEntityReaderFactory.createMultipleEntitiesReader(context,
             filters, dataToRetrieve);
@@ -90,7 +153,37 @@ public class HBaseTimelineReaderImpl
   @Override
   public Set<String> getEntityTypes(TimelineReaderContext context)
       throws IOException {
+    checkHBaseDown();
     EntityTypeReader reader = new EntityTypeReader(context);
     return reader.readEntityTypes(hbaseConf, conn);
   }
+
+  protected static final TimelineEntityFilters MONITOR_FILTERS =
+      new TimelineEntityFilters.Builder().entityLimit(1L).build();
+  protected static final TimelineDataToRetrieve DATA_TO_RETRIEVE =
+      new TimelineDataToRetrieve(null, null, null, null, null, null);
+
+  private class HBaseMonitor implements Runnable {
+    @Override
+    public void run() {
+      try {
+        LOG.info("Running HBase liveness monitor");
+        TimelineEntityReader reader =
+            TimelineEntityReaderFactory.createMultipleEntitiesReader(
+                monitorContext, MONITOR_FILTERS, DATA_TO_RETRIEVE);
+        reader.readEntities(monitorHBaseConf, monitorConn);
+
+        // on success, reset hbase down flag
+        if (hbaseDown.getAndSet(false)) {
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("HBase request succeeded, assuming HBase up");
+          }
+        }
+      } catch (Exception e) {
+        LOG.warn("Got failure attempting to read from timeline storage, " +
+            "assuming HBase down", e);
+        hbaseDown.getAndSet(true);
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
deleted file mode 100644
index fdc3ce7..0000000
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/KSMPBHelper.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
-
-/**
- * Utilities for converting protobuf classes.
- */
-public final class KSMPBHelper {
-
-  private KSMPBHelper() {
-    /** Hidden constructor */
-  }
-
-  /**
-   * Converts OzoneAcl into protobuf's OzoneAclInfo.
-   * @return OzoneAclInfo
-   */
-  public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) {
-    OzoneAclInfo.OzoneAclType aclType;
-    switch(acl.getType()) {
-    case USER:
-      aclType = OzoneAclType.USER;
-      break;
-    case GROUP:
-      aclType = OzoneAclType.GROUP;
-      break;
-    case WORLD:
-      aclType = OzoneAclType.WORLD;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL type is not recognized");
-    }
-    OzoneAclInfo.OzoneAclRights aclRights;
-    switch(acl.getRights()) {
-    case READ:
-      aclRights = OzoneAclRights.READ;
-      break;
-    case WRITE:
-      aclRights = OzoneAclRights.WRITE;
-      break;
-    case READ_WRITE:
-      aclRights = OzoneAclRights.READ_WRITE;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL right is not recognized");
-    }
-
-    return OzoneAclInfo.newBuilder().setType(aclType)
-        .setName(acl.getName())
-        .setRights(aclRights)
-        .build();
-  }
-
-  /**
-   * Converts protobuf's OzoneAclInfo into OzoneAcl.
-   * @return OzoneAcl
-   */
-  public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) {
-    OzoneAcl.OzoneACLType aclType;
-    switch(aclInfo.getType()) {
-    case USER:
-      aclType = OzoneAcl.OzoneACLType.USER;
-      break;
-    case GROUP:
-      aclType = OzoneAcl.OzoneACLType.GROUP;
-      break;
-    case WORLD:
-      aclType = OzoneAcl.OzoneACLType.WORLD;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL type is not recognized");
-    }
-    OzoneAcl.OzoneACLRights aclRights;
-    switch(aclInfo.getRights()) {
-    case READ:
-      aclRights = OzoneAcl.OzoneACLRights.READ;
-      break;
-    case WRITE:
-      aclRights = OzoneAcl.OzoneACLRights.WRITE;
-      break;
-    case READ_WRITE:
-      aclRights = OzoneAcl.OzoneACLRights.READ_WRITE;
-      break;
-    default:
-      throw new IllegalArgumentException("ACL right is not recognized");
-    }
-
-    return new OzoneAcl(aclType, aclInfo.getName(), aclRights);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
new file mode 100644
index 0000000..d57d32e
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
+
+/**
+ * Utilities for converting protobuf classes.
+ */
+public final class OMPBHelper {
+
+  private OMPBHelper() {
+    /** Hidden constructor */
+  }
+
+  /**
+   * Converts OzoneAcl into protobuf's OzoneAclInfo.
+   * @return OzoneAclInfo
+   */
+  public static OzoneAclInfo convertOzoneAcl(OzoneAcl acl) {
+    OzoneAclInfo.OzoneAclType aclType;
+    switch(acl.getType()) {
+    case USER:
+      aclType = OzoneAclType.USER;
+      break;
+    case GROUP:
+      aclType = OzoneAclType.GROUP;
+      break;
+    case WORLD:
+      aclType = OzoneAclType.WORLD;
+      break;
+    default:
+      throw new IllegalArgumentException("ACL type is not recognized");
+    }
+    OzoneAclInfo.OzoneAclRights aclRights;
+    switch(acl.getRights()) {
+    case READ:
+      aclRights = OzoneAclRights.READ;
+      break;
+    case WRITE:
+      aclRights = OzoneAclRights.WRITE;
+      break;
+    case READ_WRITE:
+      aclRights = OzoneAclRights.READ_WRITE;
+      break;
+    default:
+      throw new IllegalArgumentException("ACL right is not recognized");
+    }
+
+    return OzoneAclInfo.newBuilder().setType(aclType)
+        .setName(acl.getName())
+        .setRights(aclRights)
+        .build();
+  }
+
+  /**
+   * Converts protobuf's OzoneAclInfo into OzoneAcl.
+   * @return OzoneAcl
+   */
+  public static OzoneAcl convertOzoneAcl(OzoneAclInfo aclInfo) {
+    OzoneAcl.OzoneACLType aclType;
+    switch(aclInfo.getType()) {
+    case USER:
+      aclType = OzoneAcl.OzoneACLType.USER;
+      break;
+    case GROUP:
+      aclType = OzoneAcl.OzoneACLType.GROUP;
+      break;
+    case WORLD:
+      aclType = OzoneAcl.OzoneACLType.WORLD;
+      break;
+    default:
+      throw new IllegalArgumentException("ACL type is not recognized");
+    }
+    OzoneAcl.OzoneACLRights aclRights;
+    switch(aclInfo.getRights()) {
+    case READ:
+      aclRights = OzoneAcl.OzoneACLRights.READ;
+      break;
+    case WRITE:
+      aclRights = OzoneAcl.OzoneACLRights.WRITE;
+      break;
+    case READ_WRITE:
+      aclRights = OzoneAcl.OzoneACLRights.READ_WRITE;
+      break;
+    default:
+      throw new IllegalArgumentException("ACL right is not recognized");
+    }
+
+    return new OzoneAcl(aclType, aclInfo.getName(), aclRights);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
deleted file mode 100644
index d3d1de6..0000000
--- a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto
+++ /dev/null
@@ -1,474 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and unstable.
- * Please see http://wiki.apache.org/hadoop/Compatibility
- * for what changes are allowed for a *unstable* .proto interface.
- */
-
-option java_package = "org.apache.hadoop.ozone.protocol.proto";
-option java_outer_classname = "KeySpaceManagerProtocolProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-package hadoop.ozone;
-
-/**
-This is file contains the protocol to communicate with
-Ozone key space manager. Ozone KSM manages the namespace for ozone.
-This is similar to Namenode for Ozone.
-*/
-
-import "hdfs.proto";
-import "hdds.proto";
-
-enum Status {
-    OK = 1;
-    VOLUME_NOT_UNIQUE = 2;
-    VOLUME_NOT_FOUND = 3;
-    VOLUME_NOT_EMPTY = 4;
-    VOLUME_ALREADY_EXISTS = 5;
-    USER_NOT_FOUND = 6;
-    USER_TOO_MANY_VOLUMES = 7;
-    BUCKET_NOT_FOUND = 8;
-    BUCKET_NOT_EMPTY = 9;
-    BUCKET_ALREADY_EXISTS = 10;
-    KEY_ALREADY_EXISTS = 11;
-    KEY_NOT_FOUND = 12;
-    INVALID_KEY_NAME = 13;
-    ACCESS_DENIED = 14;
-    INTERNAL_ERROR = 15;
-}
-
-
-message VolumeInfo {
-    required string adminName = 1;
-    required string ownerName = 2;
-    required string volume = 3;
-    optional uint64 quotaInBytes = 4;
-    repeated hadoop.hdds.KeyValue metadata = 5;
-    repeated OzoneAclInfo volumeAcls = 6;
-    required uint64 creationTime = 7;
-}
-
-/**
-    Creates a volume
-*/
-message CreateVolumeRequest {
-    required VolumeInfo volumeInfo = 1;
-}
-
-message CreateVolumeResponse {
-
-    required Status status = 1;
-}
-
-message VolumeList {
-    repeated string volumeNames = 1;
-}
-
-/**
-    Changes the Volume Properties -- like ownership and quota for a volume.
-*/
-message SetVolumePropertyRequest {
-    required string volumeName = 1;
-    optional string ownerName = 2;
-    optional uint64 quotaInBytes = 3;
-}
-
-message SetVolumePropertyResponse {
-    required Status status = 1;
-}
-
-/**
- * Checks if the user has specified permissions for the volume
- */
-message CheckVolumeAccessRequest {
-    required string volumeName = 1;
-    required OzoneAclInfo userAcl = 2;
-}
-
-message CheckVolumeAccessResponse {
-
-    required Status status = 1;
-}
-
-
-/**
-    Returns information about a volume.
-*/
-
-message InfoVolumeRequest {
-    required string volumeName = 1;
-}
-
-message InfoVolumeResponse {
-    required Status status = 1;
-    optional VolumeInfo volumeInfo = 2;
-
-}
-
-/**
-    Deletes an existing volume.
-*/
-message DeleteVolumeRequest {
-    required string volumeName = 1;
-}
-
-message DeleteVolumeResponse {
-    required Status status = 1;
-}
-
-
-/**
-    List Volumes -- List all volumes in the cluster or by user.
-*/
-
-message ListVolumeRequest {
-    enum Scope {
-        USER_VOLUMES = 1;   // User volumes -- called by user
-        VOLUMES_BY_USER = 2; // User volumes - called by Admin
-        VOLUMES_BY_CLUSTER = 3; // All volumes in the cluster
-    }
-    required Scope scope = 1;
-    optional string userName = 2;
-    optional string prefix = 3;
-    optional string prevKey = 4;
-    optional uint32 maxKeys = 5;
-}
-
-message ListVolumeResponse {
-    required Status status = 1;
-    repeated VolumeInfo volumeInfo = 2;
-}
-
-message BucketInfo {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    repeated OzoneAclInfo acls = 3;
-    required bool isVersionEnabled = 4 [default = false];
-    required hadoop.hdfs.StorageTypeProto storageType = 5 [default = DISK];
-    required uint64 creationTime = 6;
-}
-
-message BucketArgs {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    repeated OzoneAclInfo addAcls = 3;
-    repeated OzoneAclInfo removeAcls = 4;
-    optional bool isVersionEnabled = 5;
-    optional hadoop.hdfs.StorageTypeProto storageType = 6;
-}
-
-message OzoneAclInfo {
-    enum OzoneAclType {
-        USER = 1;
-        GROUP = 2;
-        WORLD = 3;
-    }
-    enum OzoneAclRights {
-        READ = 1;
-        WRITE = 2;
-        READ_WRITE = 3;
-    }
-    required OzoneAclType type = 1;
-    required string name = 2;
-    required OzoneAclRights rights = 3;
-}
-
-message CreateBucketRequest {
-    required BucketInfo bucketInfo = 1;
-}
-
-message CreateBucketResponse {
-    required Status status = 1;
-}
-
-message InfoBucketRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-}
-
-message InfoBucketResponse {
-    required Status status = 1;
-    optional BucketInfo bucketInfo = 2;
-}
-
-message ListBucketsRequest {
-    required string volumeName = 1;
-    optional string startKey = 2;
-    optional string prefix = 3;
-    optional int32 count = 4;
-}
-
-message ListBucketsResponse {
-    required Status status = 1;
-    repeated BucketInfo bucketInfo = 2;
-}
-
-message KeyArgs {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    required string keyName = 3;
-    optional uint64 dataSize = 4;
-    optional hadoop.hdds.ReplicationType type = 5;
-    optional hadoop.hdds.ReplicationFactor factor = 6;
-}
-
-message KeyLocation {
-    required hadoop.hdds.BlockID blockID = 1;
-    required bool shouldCreateContainer = 2;
-    required uint64 offset = 3;
-    required uint64 length = 4;
-    // indicated at which version this block gets created.
-    optional uint64 createVersion = 5;
-}
-
-message KeyLocationList {
-    optional uint64 version = 1;
-    repeated KeyLocation keyLocations = 2;
-}
-
-message KeyInfo {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    required string keyName = 3;
-    required uint64 dataSize = 4;
-    required hadoop.hdds.ReplicationType type = 5;
-    required hadoop.hdds.ReplicationFactor factor = 6;
-    repeated KeyLocationList keyLocationList = 7;
-    required uint64 creationTime = 8;
-    required uint64 modificationTime = 9;
-    optional uint64 latestVersion = 10;
-}
-
-message LocateKeyRequest {
-    required KeyArgs keyArgs = 1;
-}
-
-message LocateKeyResponse {
-    required Status status = 1;
-    optional KeyInfo keyInfo = 2;
-    // clients' followup request may carry this ID for stateful operations (similar
-    // to a cookie).
-    optional uint32 ID = 3;
-    // TODO : allow specifiying a particular version to read.
-    optional uint64 openVersion = 4;
-}
-
-message SetBucketPropertyRequest {
-    required BucketArgs bucketArgs = 1;
-}
-
-message SetBucketPropertyResponse {
-    required Status status = 1;
-}
-
-message RenameKeyRequest{
-    required KeyArgs keyArgs = 1;
-    required string toKeyName = 2;
-}
-
-message RenameKeyResponse{
-    required Status status = 1;
-}
-
-message DeleteBucketRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-}
-
-message DeleteBucketResponse {
-    required Status status = 1;
-}
-
-message ListKeysRequest {
-    required string volumeName = 1;
-    required string bucketName = 2;
-    optional string startKey = 3;
-    optional string prefix = 4;
-    optional int32 count = 5;
-}
-
-message ListKeysResponse {
-    required Status status = 1;
-    repeated KeyInfo keyInfo = 2;
-}
-
-message AllocateBlockRequest {
-    required KeyArgs keyArgs = 1;
-    required uint32 clientID = 2;
-}
-
-message AllocateBlockResponse {
-    required Status status = 1;
-    required KeyLocation keyLocation = 2;
-}
-
-message CommitKeyRequest {
-    required KeyArgs keyArgs = 1;
-    required uint32 clientID = 2;
-}
-
-message CommitKeyResponse {
-    required Status status = 1;
-}
-
-message ServiceListRequest {
-}
-
-message ServiceListResponse {
-    required Status status = 1;
-    repeated ServiceInfo serviceInfo = 2;
-}
-
-message ServicePort {
-    enum Type {
-        RPC = 1;
-        HTTP = 2;
-        HTTPS = 3;
-        RATIS = 4;
-    };
-    required Type type = 1;
-    required uint32 value = 2;
-}
-
-message ServiceInfo {
-    required hadoop.hdds.NodeType nodeType = 1;
-    required string hostname = 2;
-    repeated ServicePort servicePorts = 3;
-}
-
-/**
- The KSM service that takes care of Ozone namespace.
-*/
-service KeySpaceManagerService {
-
-    /**
-        Creates a Volume.
-    */
-    rpc createVolume(CreateVolumeRequest)
-        returns(CreateVolumeResponse);
-
-    /**
-        Allows modificiation of volume properties.
-    */
-    rpc setVolumeProperty(SetVolumePropertyRequest)
-        returns (SetVolumePropertyResponse);
-
-    /**
-        Checks if the specified volume is accesible by the specified user.
-    */
-    rpc checkVolumeAccess(CheckVolumeAccessRequest)
-        returns (CheckVolumeAccessResponse);
-
-    /**
-        Gets Volume information.
-    */
-    rpc infoVolume(InfoVolumeRequest)
-        returns(InfoVolumeResponse);
-    /**
-        Deletes a volume if it is empty.
-    */
-    rpc deleteVolume(DeleteVolumeRequest)
-        returns (DeleteVolumeResponse);
-
-    /**
-        Lists Volumes
-    */
-    rpc listVolumes(ListVolumeRequest)
-        returns (ListVolumeResponse);
-
-    /**
-        Creates a Bucket.
-    */
-    rpc createBucket(CreateBucketRequest)
-        returns(CreateBucketResponse);
-
-    /**
-        Get Bucket information.
-    */
-    rpc infoBucket(InfoBucketRequest)
-        returns(InfoBucketResponse);
-
-    /**
-        Sets bucket properties.
-    */
-    rpc setBucketProperty(SetBucketPropertyRequest)
-        returns(SetBucketPropertyResponse);
-
-    /**
-        Get key.
-    */
-    rpc createKey(LocateKeyRequest)
-        returns(LocateKeyResponse);
-
-    /**
-       Look up for an existing key.
-    */
-    rpc lookupKey(LocateKeyRequest)
-        returns(LocateKeyResponse);
-
-    /**
-       Rename an existing key within a bucket.
-    */
-    rpc renameKey(RenameKeyRequest)
-        returns(RenameKeyResponse);
-
-    /**
-       Delete an existing key.
-    */
-    rpc deleteKey(LocateKeyRequest)
-        returns(LocateKeyResponse);
-
-    /**
-       Deletes a bucket from volume if it is empty.
-    */
-    rpc deleteBucket(DeleteBucketRequest)
-        returns (DeleteBucketResponse);
-
-    /**
-       List Buckets.
-    */
-    rpc listBuckets(ListBucketsRequest)
-    returns(ListBucketsResponse);
-
-    /**
-       List Keys.
-    */
-    rpc listKeys(ListKeysRequest)
-    returns(ListKeysResponse);
-
-    /**
-      Commit a key.
-    */
-    rpc commitKey(CommitKeyRequest)
-    returns(CommitKeyResponse);
-
-    /**
-      Allocate a new block for a key.
-    */
-    rpc allocateBlock(AllocateBlockRequest)
-    returns(AllocateBlockResponse);
-
-    /**
-      Returns list of Ozone services with its configuration details.
-    */
-    rpc getServiceList(ServiceListRequest)
-    returns(ServiceListResponse);
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
new file mode 100644
index 0000000..36b1c83
--- /dev/null
+++ b/hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto
@@ -0,0 +1,480 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and unstable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *unstable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.ozone.protocol.proto";
+option java_outer_classname = "OzoneManagerProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.ozone;
+
+/**
+This is file contains the protocol to communicate with
+Ozone Manager. Ozone Manager manages the namespace for ozone.
+This is similar to Namenode for Ozone.
+*/
+
+import "hdfs.proto";
+import "hdds.proto";
+
+enum Status {
+    OK = 1;
+    VOLUME_NOT_UNIQUE = 2;
+    VOLUME_NOT_FOUND = 3;
+    VOLUME_NOT_EMPTY = 4;
+    VOLUME_ALREADY_EXISTS = 5;
+    USER_NOT_FOUND = 6;
+    USER_TOO_MANY_VOLUMES = 7;
+    BUCKET_NOT_FOUND = 8;
+    BUCKET_NOT_EMPTY = 9;
+    BUCKET_ALREADY_EXISTS = 10;
+    KEY_ALREADY_EXISTS = 11;
+    KEY_NOT_FOUND = 12;
+    INVALID_KEY_NAME = 13;
+    ACCESS_DENIED = 14;
+    INTERNAL_ERROR = 15;
+    KEY_ALLOCATION_ERROR = 16;
+    KEY_DELETION_ERROR = 17;
+    KEY_RENAME_ERROR = 18;
+    METADATA_ERROR = 19;
+    OM_NOT_INITIALIZED = 20;
+    SCM_VERSION_MISMATCH_ERROR = 21;
+}
+
+
+message VolumeInfo {
+    required string adminName = 1;
+    required string ownerName = 2;
+    required string volume = 3;
+    optional uint64 quotaInBytes = 4;
+    repeated hadoop.hdds.KeyValue metadata = 5;
+    repeated OzoneAclInfo volumeAcls = 6;
+    required uint64 creationTime = 7;
+}
+
+/**
+    Creates a volume
+*/
+message CreateVolumeRequest {
+    required VolumeInfo volumeInfo = 1;
+}
+
+message CreateVolumeResponse {
+
+    required Status status = 1;
+}
+
+message VolumeList {
+    repeated string volumeNames = 1;
+}
+
+/**
+    Changes the Volume Properties -- like ownership and quota for a volume.
+*/
+message SetVolumePropertyRequest {
+    required string volumeName = 1;
+    optional string ownerName = 2;
+    optional uint64 quotaInBytes = 3;
+}
+
+message SetVolumePropertyResponse {
+    required Status status = 1;
+}
+
+/**
+ * Checks if the user has specified permissions for the volume
+ */
+message CheckVolumeAccessRequest {
+    required string volumeName = 1;
+    required OzoneAclInfo userAcl = 2;
+}
+
+message CheckVolumeAccessResponse {
+
+    required Status status = 1;
+}
+
+
+/**
+    Returns information about a volume.
+*/
+
+message InfoVolumeRequest {
+    required string volumeName = 1;
+}
+
+message InfoVolumeResponse {
+    required Status status = 1;
+    optional VolumeInfo volumeInfo = 2;
+
+}
+
+/**
+    Deletes an existing volume.
+*/
+message DeleteVolumeRequest {
+    required string volumeName = 1;
+}
+
+message DeleteVolumeResponse {
+    required Status status = 1;
+}
+
+
+/**
+    List Volumes -- List all volumes in the cluster or by user.
+*/
+
+message ListVolumeRequest {
+    enum Scope {
+        USER_VOLUMES = 1;   // User volumes -- called by user
+        VOLUMES_BY_USER = 2; // User volumes - called by Admin
+        VOLUMES_BY_CLUSTER = 3; // All volumes in the cluster
+    }
+    required Scope scope = 1;
+    optional string userName = 2;
+    optional string prefix = 3;
+    optional string prevKey = 4;
+    optional uint32 maxKeys = 5;
+}
+
+message ListVolumeResponse {
+    required Status status = 1;
+    repeated VolumeInfo volumeInfo = 2;
+}
+
+message BucketInfo {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    repeated OzoneAclInfo acls = 3;
+    required bool isVersionEnabled = 4 [default = false];
+    required hadoop.hdfs.StorageTypeProto storageType = 5 [default = DISK];
+    required uint64 creationTime = 6;
+}
+
+message BucketArgs {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    repeated OzoneAclInfo addAcls = 3;
+    repeated OzoneAclInfo removeAcls = 4;
+    optional bool isVersionEnabled = 5;
+    optional hadoop.hdfs.StorageTypeProto storageType = 6;
+}
+
+message OzoneAclInfo {
+    enum OzoneAclType {
+        USER = 1;
+        GROUP = 2;
+        WORLD = 3;
+    }
+    enum OzoneAclRights {
+        READ = 1;
+        WRITE = 2;
+        READ_WRITE = 3;
+    }
+    required OzoneAclType type = 1;
+    required string name = 2;
+    required OzoneAclRights rights = 3;
+}
+
+message CreateBucketRequest {
+    required BucketInfo bucketInfo = 1;
+}
+
+message CreateBucketResponse {
+    required Status status = 1;
+}
+
+message InfoBucketRequest {
+    required string volumeName = 1;
+    required string bucketName = 2;
+}
+
+message InfoBucketResponse {
+    required Status status = 1;
+    optional BucketInfo bucketInfo = 2;
+}
+
+message ListBucketsRequest {
+    required string volumeName = 1;
+    optional string startKey = 2;
+    optional string prefix = 3;
+    optional int32 count = 4;
+}
+
+message ListBucketsResponse {
+    required Status status = 1;
+    repeated BucketInfo bucketInfo = 2;
+}
+
+message KeyArgs {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    required string keyName = 3;
+    optional uint64 dataSize = 4;
+    optional hadoop.hdds.ReplicationType type = 5;
+    optional hadoop.hdds.ReplicationFactor factor = 6;
+}
+
+message KeyLocation {
+    required hadoop.hdds.BlockID blockID = 1;
+    required bool shouldCreateContainer = 2;
+    required uint64 offset = 3;
+    required uint64 length = 4;
+    // indicated at which version this block gets created.
+    optional uint64 createVersion = 5;
+}
+
+message KeyLocationList {
+    optional uint64 version = 1;
+    repeated KeyLocation keyLocations = 2;
+}
+
+message KeyInfo {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    required string keyName = 3;
+    required uint64 dataSize = 4;
+    required hadoop.hdds.ReplicationType type = 5;
+    required hadoop.hdds.ReplicationFactor factor = 6;
+    repeated KeyLocationList keyLocationList = 7;
+    required uint64 creationTime = 8;
+    required uint64 modificationTime = 9;
+    optional uint64 latestVersion = 10;
+}
+
+message LocateKeyRequest {
+    required KeyArgs keyArgs = 1;
+}
+
+message LocateKeyResponse {
+    required Status status = 1;
+    optional KeyInfo keyInfo = 2;
+    // clients' followup request may carry this ID for stateful operations (similar
+    // to a cookie).
+    optional uint32 ID = 3;
+    // TODO : allow specifiying a particular version to read.
+    optional uint64 openVersion = 4;
+}
+
+message SetBucketPropertyRequest {
+    required BucketArgs bucketArgs = 1;
+}
+
+message SetBucketPropertyResponse {
+    required Status status = 1;
+}
+
+message RenameKeyRequest{
+    required KeyArgs keyArgs = 1;
+    required string toKeyName = 2;
+}
+
+message RenameKeyResponse{
+    required Status status = 1;
+}
+
+message DeleteBucketRequest {
+    required string volumeName = 1;
+    required string bucketName = 2;
+}
+
+message DeleteBucketResponse {
+    required Status status = 1;
+}
+
+message ListKeysRequest {
+    required string volumeName = 1;
+    required string bucketName = 2;
+    optional string startKey = 3;
+    optional string prefix = 4;
+    optional int32 count = 5;
+}
+
+message ListKeysResponse {
+    required Status status = 1;
+    repeated KeyInfo keyInfo = 2;
+}
+
+message AllocateBlockRequest {
+    required KeyArgs keyArgs = 1;
+    required uint32 clientID = 2;
+}
+
+message AllocateBlockResponse {
+    required Status status = 1;
+    required KeyLocation keyLocation = 2;
+}
+
+message CommitKeyRequest {
+    required KeyArgs keyArgs = 1;
+    required uint32 clientID = 2;
+}
+
+message CommitKeyResponse {
+    required Status status = 1;
+}
+
+message ServiceListRequest {
+}
+
+message ServiceListResponse {
+    required Status status = 1;
+    repeated ServiceInfo serviceInfo = 2;
+}
+
+message ServicePort {
+    enum Type {
+        RPC = 1;
+        HTTP = 2;
+        HTTPS = 3;
+        RATIS = 4;
+    };
+    required Type type = 1;
+    required uint32 value = 2;
+}
+
+message ServiceInfo {
+    required hadoop.hdds.NodeType nodeType = 1;
+    required string hostname = 2;
+    repeated ServicePort servicePorts = 3;
+}
+
+/**
+ The OM service that takes care of Ozone namespace.
+*/
+service OzoneManagerService {
+
+    /**
+        Creates a Volume.
+    */
+    rpc createVolume(CreateVolumeRequest)
+        returns(CreateVolumeResponse);
+
+    /**
+        Allows modificiation of volume properties.
+    */
+    rpc setVolumeProperty(SetVolumePropertyRequest)
+        returns (SetVolumePropertyResponse);
+
+    /**
+        Checks if the specified volume is accesible by the specified user.
+    */
+    rpc checkVolumeAccess(CheckVolumeAccessRequest)
+        returns (CheckVolumeAccessResponse);
+
+    /**
+        Gets Volume information.
+    */
+    rpc infoVolume(InfoVolumeRequest)
+        returns(InfoVolumeResponse);
+    /**
+        Deletes a volume if it is empty.
+    */
+    rpc deleteVolume(DeleteVolumeRequest)
+        returns (DeleteVolumeResponse);
+
+    /**
+        Lists Volumes
+    */
+    rpc listVolumes(ListVolumeRequest)
+        returns (ListVolumeResponse);
+
+    /**
+        Creates a Bucket.
+    */
+    rpc createBucket(CreateBucketRequest)
+        returns(CreateBucketResponse);
+
+    /**
+        Get Bucket information.
+    */
+    rpc infoBucket(InfoBucketRequest)
+        returns(InfoBucketResponse);
+
+    /**
+        Sets bucket properties.
+    */
+    rpc setBucketProperty(SetBucketPropertyRequest)
+        returns(SetBucketPropertyResponse);
+
+    /**
+        Get key.
+    */
+    rpc createKey(LocateKeyRequest)
+        returns(LocateKeyResponse);
+
+    /**
+       Look up for an existing key.
+    */
+    rpc lookupKey(LocateKeyRequest)
+        returns(LocateKeyResponse);
+
+    /**
+       Rename an existing key within a bucket.
+    */
+    rpc renameKey(RenameKeyRequest)
+        returns(RenameKeyResponse);
+
+    /**
+       Delete an existing key.
+    */
+    rpc deleteKey(LocateKeyRequest)
+        returns(LocateKeyResponse);
+
+    /**
+       Deletes a bucket from volume if it is empty.
+    */
+    rpc deleteBucket(DeleteBucketRequest)
+        returns (DeleteBucketResponse);
+
+    /**
+       List Buckets.
+    */
+    rpc listBuckets(ListBucketsRequest)
+    returns(ListBucketsResponse);
+
+    /**
+       List Keys.
+    */
+    rpc listKeys(ListKeysRequest)
+    returns(ListKeysResponse);
+
+    /**
+      Commit a key.
+    */
+    rpc commitKey(CommitKeyRequest)
+    returns(CommitKeyResponse);
+
+    /**
+      Allocate a new block for a key.
+    */
+    rpc allocateBlock(AllocateBlockRequest)
+    returns(AllocateBlockResponse);
+
+    /**
+      Returns list of Ozone services with its configuration details.
+    */
+    rpc getServiceList(ServiceListRequest)
+    returns(ServiceListResponse);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/docs/content/GettingStarted.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/GettingStarted.md b/hadoop-ozone/docs/content/GettingStarted.md
index 531d192..117a307 100644
--- a/hadoop-ozone/docs/content/GettingStarted.md
+++ b/hadoop-ozone/docs/content/GettingStarted.md
@@ -194,12 +194,12 @@ This path will be created by datanodes if it doesn't exist already. Here is an
     </property>
     ```
 
-1. **ozone.ksm.address** OM server address. This is used by OzoneClient and
+1. **ozone.om.address** OM server address. This is used by OzoneClient and
 Ozone File System.
     ```
     <property>
-       <name>ozone.ksm.address</name>
-       <value>ksm.hadoop.apache.org</value>
+       <name>ozone.om.address</name>
+       <value>om.hadoop.apache.org</value>
     </property>
     ```
 
@@ -210,10 +210,10 @@ Ozone File System.
 | ozone.enabled                  | True                         | This enables SCM and  containers in HDFS cluster.                |
 | ozone.metadata.dirs            | file path                    | The metadata will be stored here.                                |
 | ozone.scm.names                | SCM server name              | Hostname:port or or IP:port address of SCM.                      |
-| ozone.scm.block.client.address | SCM server name and port     | Used by services like OM                                        |
+| ozone.scm.block.client.address | SCM server name and port     | Used by services like OM                                         |
 | ozone.scm.client.address       | SCM server name and port     | Used by client side                                              |
 | ozone.scm.datanode.address     | SCM server name and port     | Used by datanode to talk to SCM                                  |
-| ozone.ksm.address              | OM server name              | Used by Ozone handler and Ozone file system.                     |
+| ozone.om.address               | OM server name               | Used by Ozone handler and Ozone file system.                     |
 
 
 #### Sample ozone-site.xml
@@ -253,7 +253,7 @@ Ozone File System.
      </property>
 
      <property>
-       <name>ozone.ksm.address</name>
+       <name>ozone.om.address</name>
        <value>127.0.0.1:9874</value>
      </property>
 </configuration>
@@ -286,12 +286,12 @@ ozone --daemon start scm
 
 Once SCM gets started, OM must be initialized.
 ```
-ozone ksm -createObjectStore
+ozone om -createObjectStore
 ```
 
 Start OM.
 ```
-ozone --daemon start ksm
+ozone --daemon start om
 ```
 
 If you would like to start HDFS and Ozone together, you can do that by running
@@ -349,7 +349,7 @@ log4j.additivity.org.apache.hadoop.ozone=false
 ```
 
 On the SCM/OM side, you will be able to see
-1. `hadoop-hdfs-ksm-hostname.log`
+1. `hadoop-hdfs-om-hostname.log`
 1. `hadoop-hdfs-scm-hostname.log`
 
 ## Reporting Bugs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/docs/content/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Metrics.md b/hadoop-ozone/docs/content/Metrics.md
index dc58460..64a481f 100644
--- a/hadoop-ozone/docs/content/Metrics.md
+++ b/hadoop-ozone/docs/content/Metrics.md
@@ -131,10 +131,10 @@ Following are the counters for containers:
 
 ### Key Space Metrics
 
-The metrics for various key space manager operations in HDFS Ozone.
+The metrics for various Ozone Manager operations in HDFS Ozone.
 
-key space manager (KSM) is a service that similar to the Namenode in HDFS.
-In the current design of KSM, it maintains metadata of all volumes, buckets and keys.
+The Ozone Manager (OM) is a service that similar to the Namenode in HDFS.
+In the current design of OM, it maintains metadata of all volumes, buckets and keys.
 These metrics are only available when ozone is enabled.
 
 Following is the set of counters maintained for each key space operation.
@@ -142,12 +142,12 @@ Following is the set of counters maintained for each key space operation.
 *Total number of operation* - We maintain an array which counts how
 many times a specific operation has been performed.
 Eg.`NumVolumeCreate` tells us how many times create volume has been
-invoked in KSM.
+invoked in OM.
 
 *Total number of failed operation* - This type operation is opposite to the above
 operation.
 Eg.`NumVolumeCreateFails` tells us how many times create volume has been invoked
-failed in KSM.
+failed in OM.
 
 Following are the counters for each of key space operations.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/docs/content/_index.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/_index.md b/hadoop-ozone/docs/content/_index.md
index ab7eabe..383b2e0 100644
--- a/hadoop-ozone/docs/content/_index.md
+++ b/hadoop-ozone/docs/content/_index.md
@@ -56,14 +56,14 @@ This is like DFSClient in HDFS. This acts as the standard client to talk to
 Ozone. All other components that we have discussed so far rely on Ozone client
 (TODO: Add Ozone client documentation).

 
-## Key Space Manager

+## Ozone Manager
 
-Key Space Manager(KSM) takes care of the Ozone's namespace.
-All ozone entities like volumes, buckets and keys are managed by KSM
-(TODO: Add KSM documentation). In Short, KSM is the metadata manager for Ozone.
-KSM talks to blockManager(SCM) to get blocks and passes it on to the Ozone
+Ozone Manager (OM) takes care of the Ozone's namespace.
+All ozone entities like volumes, buckets and keys are managed by OM
+(TODO: Add OM documentation). In short, OM is the metadata manager for Ozone.
+OM talks to blockManager(SCM) to get blocks and passes it on to the Ozone
 client.  Ozone client writes data to these blocks.
-KSM will eventually be replicated via Apache Ratis for High Availability.

+OM will eventually be replicated via Apache Ratis for High Availability.

 
 ## Storage Container Manager
 Storage Container Manager (SCM) is the block and cluster manager for Ozone.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/docs/static/OzoneOverview.svg
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/static/OzoneOverview.svg b/hadoop-ozone/docs/static/OzoneOverview.svg
index 2e14d3f..0120a5c 100644
--- a/hadoop-ozone/docs/static/OzoneOverview.svg
+++ b/hadoop-ozone/docs/static/OzoneOverview.svg
@@ -166,7 +166,7 @@
             <path d="M307.5,148.5 L433.5,148.5" id="Line" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
             <path id="Line-decoration-1" d="M433.5,148.5 L422.7,145.5 L422.7,151.5 L433.5,148.5 Z" stroke="#000000" fill="#000000" stroke-linecap="square"></path>
             <path d="M4,232 L699,232" id="Line" stroke="#000000" stroke-width="2" stroke-linecap="square" stroke-dasharray="5,2,5"></path>
-            <g id="KSM" transform="translate(432.000000, 132.000000)">
+            <g id="OM" transform="translate(432.000000, 132.000000)">
                 <g id="Rectangle-3">
                     <use fill="#C6D4F9" fill-rule="evenodd" xlink:href="#path-19"></use>
                     <rect stroke="#000000" stroke-width="1" x="0.5" y="0.5" width="225" height="35" rx="8"></rect>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 091d771..b568672 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
+import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -82,12 +82,12 @@ public interface MiniOzoneCluster {
   StorageContainerManager getStorageContainerManager();
 
   /**
-   * Returns {@link KeySpaceManager} associated with this
+   * Returns {@link OzoneManager} associated with this
    * {@link MiniOzoneCluster} instance.
    *
-   * @return {@link KeySpaceManager} instance
+   * @return {@link OzoneManager} instance
    */
-  KeySpaceManager getKeySpaceManager();
+  OzoneManager getOzoneManager();
 
   /**
    * Returns the list of {@link HddsDatanodeService} which are part of this
@@ -141,11 +141,11 @@ public interface MiniOzoneCluster {
   void restartStorageContainerManager() throws IOException;
 
   /**
-   * Restarts KeySpaceManager instance.
+   * Restarts OzoneManager instance.
    *
    * @throws IOException
    */
-  void restartKeySpaceManager() throws IOException;
+  void restartOzoneManager() throws IOException;
 
   /**
    * Restart a particular HddsDatanode.
@@ -184,13 +184,13 @@ public interface MiniOzoneCluster {
     protected Optional<Integer> hbInterval = Optional.empty();
     protected Optional<Integer> hbProcessorInterval = Optional.empty();
     protected Optional<String> scmId = Optional.empty();
-    protected Optional<String> ksmId = Optional.empty();
+    protected Optional<String> omId = Optional.empty();
 
     protected Boolean ozoneEnabled = true;
     protected Boolean randomContainerPort = true;
 
     // Use relative smaller number of handlers for testing
-    protected int numOfKsmHandlers = 20;
+    protected int numOfOmHandlers = 20;
     protected int numOfScmHandlers = 20;
     protected int numOfDatanodes = 1;
 
@@ -226,14 +226,14 @@ public interface MiniOzoneCluster {
     }
 
     /**
-     * Sets the KSM id.
+     * Sets the OM id.
      *
-     * @param id KSM Id
+     * @param id OM Id
      *
      * @return MiniOzoneCluster.Builder
      */
-    public Builder setKsmId(String id) {
-      ksmId = Optional.of(id);
+    public Builder setOmId(String id) {
+      omId = Optional.of(id);
       return this;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index f0bfef1..b3137bf 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -34,10 +34,10 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
-import org.apache.hadoop.ozone.ksm.KSMStorage;
+import org.apache.hadoop.ozone.om.OMStorage;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
@@ -73,7 +73,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys
 
 /**
  * MiniOzoneCluster creates a complete in-process Ozone cluster suitable for
- * running tests.  The cluster consists of a KeySpaceManager,
+ * running tests.  The cluster consists of a OzoneManager,
  * StorageContainerManager and multiple DataNodes.
  */
 @InterfaceAudience.Private
@@ -84,7 +84,7 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
 
   private final OzoneConfiguration conf;
   private final StorageContainerManager scm;
-  private final KeySpaceManager ksm;
+  private final OzoneManager ozoneManager;
   private final List<HddsDatanodeService> hddsDatanodes;
 
   /**
@@ -93,11 +93,11 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
    * @throws IOException if there is an I/O error
    */
   private MiniOzoneClusterImpl(OzoneConfiguration conf,
-                               KeySpaceManager ksm,
+                               OzoneManager ozoneManager,
                                StorageContainerManager scm,
                                List<HddsDatanodeService> hddsDatanodes) {
     this.conf = conf;
-    this.ksm = ksm;
+    this.ozoneManager = ozoneManager;
     this.scm = scm;
     this.hddsDatanodes = hddsDatanodes;
   }
@@ -147,8 +147,8 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
   }
 
   @Override
-  public KeySpaceManager getKeySpaceManager() {
-    return this.ksm;
+  public OzoneManager getOzoneManager() {
+    return this.ozoneManager;
   }
 
   @Override
@@ -209,9 +209,9 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
   }
 
   @Override
-  public void restartKeySpaceManager() throws IOException {
-    ksm.stop();
-    ksm.start();
+  public void restartOzoneManager() throws IOException {
+    ozoneManager.stop();
+    ozoneManager.start();
   }
 
   @Override
@@ -247,10 +247,10 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
               scm.getClientProtocolServer().getScmInfo().getClusterId()));
       FileUtils.deleteDirectory(baseDir);
 
-      if (ksm != null) {
-        LOG.info("Shutting down the keySpaceManager");
-        ksm.stop();
-        ksm.join();
+      if (ozoneManager != null) {
+        LOG.info("Shutting down the OzoneManager");
+        ozoneManager.stop();
+        ozoneManager.join();
       }
 
       if (scm != null) {
@@ -291,11 +291,11 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
       initializeConfiguration();
       StorageContainerManager scm = createSCM();
       scm.start();
-      KeySpaceManager ksm = createKSM();
-      ksm.start();
+      OzoneManager om = createOM();
+      om.start();
       List<HddsDatanodeService> hddsDatanodes = createHddsDatanodes(scm);
       hddsDatanodes.forEach((datanode) -> datanode.start(null));
-      return new MiniOzoneClusterImpl(conf, ksm, scm, hddsDatanodes);
+      return new MiniOzoneClusterImpl(conf, om, scm, hddsDatanodes);
     }
 
     /**
@@ -331,20 +331,20 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     }
 
     /**
-     * Creates a new KeySpaceManager instance.
+     * Creates a new OzoneManager instance.
      *
-     * @return {@link KeySpaceManager}
+     * @return {@link OzoneManager}
      *
      * @throws IOException
      */
-    private KeySpaceManager createKSM() throws IOException {
-      configureKSM();
-      KSMStorage ksmStore = new KSMStorage(conf);
-      ksmStore.setClusterId(clusterId);
-      ksmStore.setScmId(scmId.get());
-      ksmStore.setKsmId(ksmId.orElse(UUID.randomUUID().toString()));
-      ksmStore.initialize();
-      return KeySpaceManager.createKSM(null, conf);
+    private OzoneManager createOM() throws IOException {
+      configureOM();
+      OMStorage omStore = new OMStorage(conf);
+      omStore.setClusterId(clusterId);
+      omStore.setScmId(scmId.get());
+      omStore.setOmId(omId.orElse(UUID.randomUUID().toString()));
+      omStore.initialize();
+      return OzoneManager.createOm(null, conf);
     }
 
     /**
@@ -415,10 +415,10 @@ public final class MiniOzoneClusterImpl implements MiniOzoneCluster {
     }
 
 
-    private void configureKSM() {
-      conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "127.0.0.1:0");
-      conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-      conf.setInt(KSMConfigKeys.OZONE_KSM_HANDLER_COUNT_KEY, numOfKsmHandlers);
+    private void configureOM() {
+      conf.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "127.0.0.1:0");
+      conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "127.0.0.1:0");
+      conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numOfOmHandlers);
     }
 
     private void configureHddsDatanodes() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
index 4898a1b..717bb68 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.conf.TestConfigurationFieldsBase;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 
 /**
@@ -31,7 +31,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase {
     xmlFilename = new String("ozone-default.xml");
     configurationClasses =
         new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class,
-            KSMConfigKeys.class};
+            OMConfigKeys.class};
     errorIfMissingConfigProps = true;
     errorIfMissingXmlProps = true;
     xmlPropsToSkipCompare.add("hadoop.tags.custom");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index dd1a8de..cc367b3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
 import org.apache.hadoop.hdds.scm.server.SCMStorage;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -60,8 +61,7 @@ import java.util.concurrent.TimeUnit;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 
 import org.junit.rules.Timeout;
@@ -211,7 +211,7 @@ public class TestStorageContainerManager {
       // Create {numKeys} random names keys.
       TestStorageContainerManagerHelper helper =
           new TestStorageContainerManagerHelper(cluster, conf);
-      Map<String, KsmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
+      Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
 
       Map<Long, List<Long>> containerBlocks = createDeleteTXLog(delLog,
           keyLocations, helper);
@@ -293,7 +293,7 @@ public class TestStorageContainerManager {
     // Create {numKeys} random names keys.
     TestStorageContainerManagerHelper helper =
         new TestStorageContainerManagerHelper(cluster, conf);
-    Map<String, KsmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
+    Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
 
     createDeleteTXLog(delLog, keyLocations, helper);
     // Verify a few TX gets created in the TX log.
@@ -320,13 +320,13 @@ public class TestStorageContainerManager {
   }
 
   private Map<Long, List<Long>> createDeleteTXLog(DeletedBlockLog delLog,
-      Map<String, KsmKeyInfo> keyLocations,
+      Map<String, OmKeyInfo> keyLocations,
       TestStorageContainerManagerHelper helper) throws IOException {
     // These keys will be written into a bunch of containers,
     // gets a set of container names, verify container containerBlocks
     // on datanodes.
     Set<Long> containerNames = new HashSet<>();
-    for (Map.Entry<String, KsmKeyInfo> entry : keyLocations.entrySet()) {
+    for (Map.Entry<String, OmKeyInfo> entry : keyLocations.entrySet()) {
       entry.getValue().getLatestVersionLocations().getLocationList()
           .forEach(loc -> containerNames.add(loc.getContainerID()));
     }
@@ -334,7 +334,7 @@ public class TestStorageContainerManager {
     // Total number of containerBlocks of these containers should be equal to
     // total number of containerBlocks via creation call.
     int totalCreatedBlocks = 0;
-    for (KsmKeyInfo info : keyLocations.values()) {
+    for (OmKeyInfo info : keyLocations.values()) {
       totalCreatedBlocks += info.getKeyLocationVersions().size();
     }
     Assert.assertTrue(totalCreatedBlocks > 0);
@@ -343,8 +343,8 @@ public class TestStorageContainerManager {
 
     // Create a deletion TX for each key.
     Map<Long, List<Long>> containerBlocks = Maps.newHashMap();
-    for (KsmKeyInfo info : keyLocations.values()) {
-      List<KsmKeyLocationInfo> list =
+    for (OmKeyInfo info : keyLocations.values()) {
+      List<OmKeyLocationInfo> list =
           info.getLatestVersionLocations().getLocationList();
       list.forEach(location -> {
         if (containerBlocks.containsKey(location.getContainerID())) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index 4c2a904..a30c6f4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.web.handlers.BucketArgs;
 import org.apache.hadoop.ozone.web.handlers.KeyArgs;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;
@@ -67,9 +67,9 @@ public class TestStorageContainerManagerHelper {
     storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
   }
 
-  public Map<String, KsmKeyInfo> createKeys(int numOfKeys, int keySize)
+  public Map<String, OmKeyInfo> createKeys(int numOfKeys, int keySize)
       throws Exception {
-    Map<String, KsmKeyInfo> keyLocationMap = Maps.newHashMap();
+    Map<String, OmKeyInfo> keyLocationMap = Maps.newHashMap();
     String volume = "volume" + RandomStringUtils.randomNumeric(5);
     String bucket = "bucket" + RandomStringUtils.randomNumeric(5);
     String userName = "user" + RandomStringUtils.randomNumeric(5);
@@ -104,12 +104,12 @@ public class TestStorageContainerManagerHelper {
     }
 
     for (String key : keyNames) {
-      KsmKeyArgs arg = new KsmKeyArgs.Builder()
+      OmKeyArgs arg = new OmKeyArgs.Builder()
           .setVolumeName(volume)
           .setBucketName(bucket)
           .setKeyName(key)
           .build();
-      KsmKeyInfo location = cluster.getKeySpaceManager()
+      OmKeyInfo location = cluster.getOzoneManager()
           .lookupKey(arg);
       keyLocationMap.put(key, location);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
index 9918d63..0dc0399 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java
@@ -77,10 +77,10 @@ public class TestOzoneRestClient {
         OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
     cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build();
     cluster.waitForClusterToBeReady();
-    InetSocketAddress ksmHttpAddress = cluster.getKeySpaceManager()
+    InetSocketAddress omHttpAddress = cluster.getOzoneManager()
         .getHttpServer().getHttpAddress();
-    ozClient = OzoneClientFactory.getRestClient(ksmHttpAddress.getHostName(),
-        ksmHttpAddress.getPort(), conf);
+    ozClient = OzoneClientFactory.getRestClient(omHttpAddress.getHostName(),
+        omHttpAddress.getPort(), conf);
     store = ozClient.getObjectStore();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index 214382e..2fbab36 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -39,10 +39,10 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -73,7 +73,7 @@ public class TestOzoneRpcClient {
   private static MiniOzoneCluster cluster = null;
   private static OzoneClient ozClient = null;
   private static ObjectStore store = null;
-  private static KeySpaceManager keySpaceManager;
+  private static OzoneManager ozoneManager;
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
@@ -97,7 +97,7 @@ public class TestOzoneRpcClient {
     store = ozClient.getObjectStore();
     storageContainerLocationClient =
         cluster.getStorageContainerLocationClient();
-    keySpaceManager = cluster.getKeySpaceManager();
+    ozoneManager = cluster.getOzoneManager();
   }
 
   @Test
@@ -376,7 +376,7 @@ public class TestOzoneRpcClient {
   private boolean verifyRatisReplication(String volumeName, String bucketName,
       String keyName, ReplicationType type, ReplicationFactor factor)
       throws IOException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
         .setBucketName(bucketName)
         .setKeyName(keyName)
@@ -385,8 +385,8 @@ public class TestOzoneRpcClient {
         HddsProtos.ReplicationType.valueOf(type.toString());
     HddsProtos.ReplicationFactor replicationFactor =
         HddsProtos.ReplicationFactor.valueOf(factor.getValue());
-    KsmKeyInfo keyInfo = keySpaceManager.lookupKey(keyArgs);
-    for (KsmKeyLocationInfo info:
+    OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs);
+    for (OmKeyLocationInfo info:
         keyInfo.getLatestVersionLocations().getLocationList()) {
       ContainerInfo container =
           storageContainerLocationClient.getContainer(info.getContainerID());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
index 43e3f50..62059ec 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
@@ -37,10 +37,10 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
-import org.apache.hadoop.ozone.ksm.KeySpaceManager;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.ozShell.TestOzoneShell;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.utils.MetadataStore;
@@ -61,7 +61,7 @@ public class TestBlockDeletion {
   private static ObjectStore store;
   private static ContainerManagerImpl dnContainerManager = null;
   private static StorageContainerManager scm = null;
-  private static KeySpaceManager ksm = null;
+  private static OzoneManager om = null;
   private static Set<Long> containerIdsWithDeletedBlocks;
 
   @BeforeClass
@@ -88,7 +88,7 @@ public class TestBlockDeletion {
     dnContainerManager =
         (ContainerManagerImpl) cluster.getHddsDatanodes().get(0)
             .getDatanodeStateMachine().getContainer().getContainerManager();
-    ksm = cluster.getKeySpaceManager();
+    om = cluster.getOzoneManager();
     scm = cluster.getStorageContainerManager();
     containerIdsWithDeletedBlocks = new HashSet<>();
   }
@@ -112,23 +112,23 @@ public class TestBlockDeletion {
     out.write(value.getBytes());
     out.close();
 
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder().setVolumeName(volumeName)
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName)
         .setBucketName(bucketName).setKeyName(keyName).setDataSize(0)
         .setType(HddsProtos.ReplicationType.STAND_ALONE)
         .setFactor(HddsProtos.ReplicationFactor.ONE).build();
-    List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroupList =
-        ksm.lookupKey(keyArgs).getKeyLocationVersions();
+    List<OmKeyLocationInfoGroup> omKeyLocationInfoGroupList =
+        om.lookupKey(keyArgs).getKeyLocationVersions();
 
     // verify key blocks were created in DN.
-    Assert.assertTrue(verifyBlocksCreated(ksmKeyLocationInfoGroupList));
+    Assert.assertTrue(verifyBlocksCreated(omKeyLocationInfoGroupList));
     // No containers with deleted blocks
     Assert.assertTrue(containerIdsWithDeletedBlocks.isEmpty());
     // Delete transactionIds for the containers should be 0
     matchContainerTransactionIds();
-    ksm.deleteKey(keyArgs);
+    om.deleteKey(keyArgs);
     Thread.sleep(5000);
     // The blocks should be deleted in the DN.
-    Assert.assertTrue(verifyBlocksDeleted(ksmKeyLocationInfoGroupList));
+    Assert.assertTrue(verifyBlocksDeleted(omKeyLocationInfoGroupList));
 
     // Few containers with deleted blocks
     Assert.assertTrue(!containerIdsWithDeletedBlocks.isEmpty());
@@ -155,7 +155,7 @@ public class TestBlockDeletion {
   }
 
   private boolean verifyBlocksCreated(
-      List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
       throws IOException {
     return performOperationOnKeyContainers((blockID) -> {
       try {
@@ -166,11 +166,11 @@ public class TestBlockDeletion {
       } catch (IOException e) {
         e.printStackTrace();
       }
-    }, ksmKeyLocationInfoGroups);
+    }, omKeyLocationInfoGroups);
   }
 
   private boolean verifyBlocksDeleted(
-      List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
       throws IOException {
     return performOperationOnKeyContainers((blockID) -> {
       try {
@@ -186,19 +186,20 @@ public class TestBlockDeletion {
       } catch (IOException e) {
         e.printStackTrace();
       }
-    }, ksmKeyLocationInfoGroups);
+    }, omKeyLocationInfoGroups);
   }
 
   private boolean performOperationOnKeyContainers(Consumer<BlockID> consumer,
-      List<KsmKeyLocationInfoGroup> ksmKeyLocationInfoGroups)
+      List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups)
       throws IOException {
 
     try {
-      for (KsmKeyLocationInfoGroup ksmKeyLocationInfoGroup : ksmKeyLocationInfoGroups) {
-        List<KsmKeyLocationInfo> ksmKeyLocationInfos =
-            ksmKeyLocationInfoGroup.getLocationList();
-        for (KsmKeyLocationInfo ksmKeyLocationInfo : ksmKeyLocationInfos) {
-          BlockID blockID = ksmKeyLocationInfo.getBlockID();
+      for (OmKeyLocationInfoGroup omKeyLocationInfoGroup :
+          omKeyLocationInfoGroups) {
+        List<OmKeyLocationInfo> omKeyLocationInfos =
+            omKeyLocationInfoGroup.getLocationList();
+        for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfos) {
+          BlockID blockID = omKeyLocationInfo.getBlockID();
           consumer.accept(blockID);
         }
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 3e514e7..58b831b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -34,8 +34,8 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
@@ -45,7 +45,6 @@ import org.junit.Test;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.Random;
 import java.util.concurrent.TimeoutException;
 
 public class TestCloseContainerByPipeline {
@@ -98,17 +97,17 @@ public class TestCloseContainerByPipeline {
     key.close();
 
     //get the name of a valid container
-    KsmKeyArgs keyArgs =
-        new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
+    OmKeyArgs keyArgs =
+        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
             .setType(HddsProtos.ReplicationType.STAND_ALONE)
             .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
             .setKeyName("testCloseContainer").build();
 
-    KsmKeyLocationInfo ksmKeyLocationInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+    OmKeyLocationInfo omKeyLocationInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    long containerID = ksmKeyLocationInfo.getContainerID();
+    long containerID = omKeyLocationInfo.getContainerID();
     List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
         .getScmContainerManager().getContainerWithPipeline(containerID)
         .getPipeline().getMachines();
@@ -153,17 +152,17 @@ public class TestCloseContainerByPipeline {
     key.close();
 
     //get the name of a valid container
-    KsmKeyArgs keyArgs =
-        new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
+    OmKeyArgs keyArgs =
+        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
             .setType(HddsProtos.ReplicationType.STAND_ALONE)
             .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
             .setKeyName("standalone").build();
 
-    KsmKeyLocationInfo ksmKeyLocationInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+    OmKeyLocationInfo omKeyLocationInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    long containerID = ksmKeyLocationInfo.getContainerID();
+    long containerID = omKeyLocationInfo.getContainerID();
     List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
         .getScmContainerManager().getContainerWithPipeline(containerID)
         .getPipeline().getMachines();
@@ -207,16 +206,16 @@ public class TestCloseContainerByPipeline {
     key.close();
 
     //get the name of a valid container
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder().setVolumeName("test").
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName("test").
         setBucketName("test").setType(HddsProtos.ReplicationType.RATIS)
         .setFactor(HddsProtos.ReplicationFactor.THREE).setDataSize(1024)
         .setKeyName("ratis").build();
 
-    KsmKeyLocationInfo ksmKeyLocationInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+    OmKeyLocationInfo omKeyLocationInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    long containerID = ksmKeyLocationInfo.getContainerID();
+    long containerID = omKeyLocationInfo.getContainerID();
     List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
         .getScmContainerManager().getContainerWithPipeline(containerID)
         .getPipeline().getMachines();
@@ -232,7 +231,7 @@ public class TestCloseContainerByPipeline {
           .addDatanodeCommand(details.getUuid(),
               new CloseContainerCommand(containerID,
                   HddsProtos.ReplicationType.RATIS));
-  }
+    }
 
     for (DatanodeDetails datanodeDetails : datanodes) {
       GenericTestUtils.waitFor(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index efb7344..58a5154 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -28,8 +28,8 @@ import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB;
@@ -69,17 +69,17 @@ public class TestCloseContainerHandler {
     key.close();
 
     //get the name of a valid container
-    KsmKeyArgs keyArgs =
-        new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
+    OmKeyArgs keyArgs =
+        new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test")
             .setType(HddsProtos.ReplicationType.STAND_ALONE)
             .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024)
             .setKeyName("test").build();
 
-    KsmKeyLocationInfo ksmKeyLocationInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
+    OmKeyLocationInfo omKeyLocationInfo =
+        cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
             .get(0).getBlocksLatestVersionOnly().get(0);
 
-    long containerID = ksmKeyLocationInfo.getContainerID();
+    long containerID = omKeyLocationInfo.getContainerID();
 
     Assert.assertFalse(isContainerClosed(cluster, containerID));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
deleted file mode 100644
index 1cc7ff8..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.commons.lang3.RandomStringUtils;
-
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.*;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * This class tests container report with DN container state info.
- */
-public class TestContainerReportWithKeys {
-  private static final Logger LOG = LoggerFactory.getLogger(
-      TestContainerReportWithKeys.class);
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneConfiguration conf;
-  private static StorageContainerManager scm;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    scm = cluster.getStorageContainerManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testContainerReportKeyWrite() throws Exception {
-    final String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    final String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    final String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    final int keySize = 100;
-
-    OzoneClient client = OzoneClientFactory.getClient(conf);
-    ObjectStore objectStore = client.getObjectStore();
-    objectStore.createVolume(volumeName);
-    objectStore.getVolume(volumeName).createBucket(bucketName);
-    OzoneOutputStream key =
-        objectStore.getVolume(volumeName).getBucket(bucketName)
-            .createKey(keyName, keySize, ReplicationType.STAND_ALONE,
-                ReplicationFactor.ONE);
-    String dataString = RandomStringUtils.randomAlphabetic(keySize);
-    key.write(dataString.getBytes());
-    key.close();
-
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
-        .setVolumeName(volumeName)
-        .setBucketName(bucketName)
-        .setKeyName(keyName)
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(keySize)
-        .build();
-
-
-    KsmKeyLocationInfo keyInfo =
-        cluster.getKeySpaceManager().lookupKey(keyArgs).getKeyLocationVersions()
-            .get(0).getBlocksLatestVersionOnly().get(0);
-
-    ContainerData cd = getContainerData(keyInfo.getContainerID());
-
-    LOG.info("DN Container Data:  keyCount: {} used: {} ",
-        cd.getKeyCount(), cd.getBytesUsed());
-
-    ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
-
-    LOG.info("SCM Container Info keyCount: {} usedBytes: {}",
-        cinfo.getNumberOfKeys(), cinfo.getUsedBytes());
-  }
-
-
-  private static ContainerData getContainerData(long containerID) {
-    ContainerData containerData;
-    try {
-      ContainerManager containerManager = cluster.getHddsDatanodes().get(0)
-          .getDatanodeStateMachine().getContainer().getContainerManager();
-      containerData = containerManager.readContainer(containerID);
-    } catch (StorageContainerException e) {
-      throw new AssertionError(e);
-    }
-    return containerData;
-  }
-}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
index fedc0f0..ec33990 100644
--- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
+++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
@@ -22,14 +22,13 @@ import com.google.common.base.Strings;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.client.io.LengthInputStream;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.protocolPB
-    .KeySpaceManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -37,9 +36,9 @@ import org.apache.hadoop.ozone.OzoneConsts.Versioning;
 import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream;
 import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos;
-import org.apache.hadoop.ozone.protocolPB.KSMPBHelper;
-import org.apache.hadoop.ozone.ksm.KSMConfigKeys;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.web.request.OzoneQuota;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -77,8 +76,8 @@ public final class DistributedStorageHandler implements StorageHandler {
 
   private final StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private final KeySpaceManagerProtocolClientSideTranslatorPB
-      keySpaceManagerClient;
+  private final OzoneManagerProtocolClientSideTranslatorPB
+      ozoneManagerClient;
   private final XceiverClientManager xceiverClientManager;
   private final OzoneAcl.OzoneACLRights userRights;
   private final OzoneAcl.OzoneACLRights groupRights;
@@ -92,14 +91,14 @@ public final class DistributedStorageHandler implements StorageHandler {
    *
    * @param conf configuration
    * @param storageContainerLocation StorageContainerLocationProtocol proxy
-   * @param keySpaceManagerClient KeySpaceManager proxy
+   * @param ozoneManagerClient OzoneManager proxy
    */
   public DistributedStorageHandler(OzoneConfiguration conf,
       StorageContainerLocationProtocolClientSideTranslatorPB
           storageContainerLocation,
-      KeySpaceManagerProtocolClientSideTranslatorPB
-          keySpaceManagerClient) {
-    this.keySpaceManagerClient = keySpaceManagerClient;
+      OzoneManagerProtocolClientSideTranslatorPB
+                                       ozoneManagerClient) {
+    this.ozoneManagerClient = ozoneManagerClient;
     this.storageContainerLocationClient = storageContainerLocation;
     this.xceiverClientManager = new XceiverClientManager(conf);
     this.useRatis = conf.getBoolean(
@@ -116,10 +115,10 @@ public final class DistributedStorageHandler implements StorageHandler {
 
     chunkSize = conf.getInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
         ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT);
-    userRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_USER_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_USER_RIGHTS_DEFAULT);
-    groupRights = conf.getEnum(KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS,
-        KSMConfigKeys.OZONE_KSM_GROUP_RIGHTS_DEFAULT);
+    userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
+        OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
+    groupRights = conf.getEnum(OMConfigKeys.OZONE_OM_GROUP_RIGHTS,
+        OMConfigKeys.OZONE_OM_GROUP_RIGHTS_DEFAULT);
     if(chunkSize > ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE) {
       LOG.warn("The chunk size ({}) is not allowed to be more than"
               + " the maximum size ({}),"
@@ -136,26 +135,26 @@ public final class DistributedStorageHandler implements StorageHandler {
     OzoneAcl userAcl =
         new OzoneAcl(OzoneAcl.OzoneACLType.USER,
             args.getUserName(), userRights);
-    KsmVolumeArgs.Builder builder = KsmVolumeArgs.newBuilder();
+    OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder();
     builder.setAdminName(args.getAdminName())
         .setOwnerName(args.getUserName())
         .setVolume(args.getVolumeName())
         .setQuotaInBytes(quota)
-        .addOzoneAcls(KSMPBHelper.convertOzoneAcl(userAcl));
+        .addOzoneAcls(OMPBHelper.convertOzoneAcl(userAcl));
     if (args.getGroups() != null) {
       for (String group : args.getGroups()) {
         OzoneAcl groupAcl =
             new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, group, groupRights);
-        builder.addOzoneAcls(KSMPBHelper.convertOzoneAcl(groupAcl));
+        builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(groupAcl));
       }
     }
-    keySpaceManagerClient.createVolume(builder.build());
+    ozoneManagerClient.createVolume(builder.build());
   }
 
   @Override
   public void setVolumeOwner(VolumeArgs args) throws
       IOException, OzoneException {
-    keySpaceManagerClient.setOwner(args.getVolumeName(), args.getUserName());
+    ozoneManagerClient.setOwner(args.getVolumeName(), args.getUserName());
   }
 
   @Override
@@ -163,14 +162,14 @@ public final class DistributedStorageHandler implements StorageHandler {
       throws IOException, OzoneException {
     long quota = remove ? OzoneConsts.MAX_QUOTA_IN_BYTES :
         args.getQuota().sizeInBytes();
-    keySpaceManagerClient.setQuota(args.getVolumeName(), quota);
+    ozoneManagerClient.setQuota(args.getVolumeName(), quota);
   }
 
   @Override
   public boolean checkVolumeAccess(String volume, OzoneAcl acl)
       throws IOException, OzoneException {
-    return keySpaceManagerClient
-        .checkVolumeAccess(volume, KSMPBHelper.convertOzoneAcl(acl));
+    return ozoneManagerClient
+        .checkVolumeAccess(volume, OMPBHelper.convertOzoneAcl(acl));
   }
 
   @Override
@@ -185,9 +184,9 @@ public final class DistributedStorageHandler implements StorageHandler {
               OzoneConsts.MAX_LISTVOLUMES_SIZE, maxNumOfKeys));
     }
 
-    List<KsmVolumeArgs> listResult;
+    List<OmVolumeArgs> listResult;
     if (args.isRootScan()) {
-      listResult = keySpaceManagerClient.listAllVolumes(args.getPrefix(),
+      listResult = ozoneManagerClient.listAllVolumes(args.getPrefix(),
           args.getPrevKey(), args.getMaxKeys());
     } else {
       UserArgs userArgs = args.getArgs();
@@ -195,16 +194,16 @@ public final class DistributedStorageHandler implements StorageHandler {
         throw new IllegalArgumentException("Illegal argument,"
             + " missing user argument.");
       }
-      listResult = keySpaceManagerClient.listVolumeByUser(
+      listResult = ozoneManagerClient.listVolumeByUser(
           args.getArgs().getUserName(), args.getPrefix(), args.getPrevKey(),
           args.getMaxKeys());
     }
 
     // TODO Add missing fields createdBy, bucketCount and bytesUsed
     ListVolumes result = new ListVolumes();
-    for (KsmVolumeArgs volumeArgs : listResult) {
+    for (OmVolumeArgs volumeArgs : listResult) {
       VolumeInfo info = new VolumeInfo();
-      KeySpaceManagerProtocolProtos.VolumeInfo
+      OzoneManagerProtocolProtos.VolumeInfo
           infoProto = volumeArgs.getProtobuf();
       info.setOwner(new VolumeOwner(infoProto.getOwnerName()));
       info.setQuota(OzoneQuota.getOzoneQuota(infoProto.getQuotaInBytes()));
@@ -220,14 +219,14 @@ public final class DistributedStorageHandler implements StorageHandler {
   @Override
   public void deleteVolume(VolumeArgs args)
       throws IOException, OzoneException {
-    keySpaceManagerClient.deleteVolume(args.getVolumeName());
+    ozoneManagerClient.deleteVolume(args.getVolumeName());
   }
 
   @Override
   public VolumeInfo getVolumeInfo(VolumeArgs args)
       throws IOException, OzoneException {
-    KsmVolumeArgs volumeArgs =
-        keySpaceManagerClient.getVolumeInfo(args.getVolumeName());
+    OmVolumeArgs volumeArgs =
+        ozoneManagerClient.getVolumeInfo(args.getVolumeName());
     //TODO: add support for createdOn and other fields in getVolumeInfo
     VolumeInfo volInfo =
         new VolumeInfo(volumeArgs.getVolume(), null,
@@ -242,7 +241,7 @@ public final class DistributedStorageHandler implements StorageHandler {
   @Override
   public void createBucket(final BucketArgs args)
       throws IOException, OzoneException {
-    KsmBucketInfo.Builder builder = KsmBucketInfo.newBuilder();
+    OmBucketInfo.Builder builder = OmBucketInfo.newBuilder();
     builder.setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName());
     if(args.getAddAcls() != null) {
@@ -255,7 +254,7 @@ public final class DistributedStorageHandler implements StorageHandler {
       builder.setIsVersionEnabled(getBucketVersioningProtobuf(
           args.getVersioning()));
     }
-    keySpaceManagerClient.createBucket(builder.build());
+    ozoneManagerClient.createBucket(builder.build());
   }
 
   /**
@@ -285,7 +284,7 @@ public final class DistributedStorageHandler implements StorageHandler {
     List<OzoneAcl> removeAcls = args.getRemoveAcls();
     List<OzoneAcl> addAcls = args.getAddAcls();
     if(removeAcls != null || addAcls != null) {
-      KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+      OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
       builder.setVolumeName(args.getVolumeName())
           .setBucketName(args.getBucketName());
       if(removeAcls != null && !removeAcls.isEmpty()) {
@@ -294,35 +293,35 @@ public final class DistributedStorageHandler implements StorageHandler {
       if(addAcls != null && !addAcls.isEmpty()) {
         builder.setAddAcls(args.getAddAcls());
       }
-      keySpaceManagerClient.setBucketProperty(builder.build());
+      ozoneManagerClient.setBucketProperty(builder.build());
     }
   }
 
   @Override
   public void setBucketVersioning(BucketArgs args)
       throws IOException, OzoneException {
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setIsVersionEnabled(getBucketVersioningProtobuf(
             args.getVersioning()));
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
   public void setBucketStorageClass(BucketArgs args)
       throws IOException, OzoneException {
-    KsmBucketArgs.Builder builder = KsmBucketArgs.newBuilder();
+    OmBucketArgs.Builder builder = OmBucketArgs.newBuilder();
     builder.setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setStorageType(args.getStorageType());
-    keySpaceManagerClient.setBucketProperty(builder.build());
+    ozoneManagerClient.setBucketProperty(builder.build());
   }
 
   @Override
   public void deleteBucket(BucketArgs args)
       throws IOException, OzoneException {
-    keySpaceManagerClient.deleteBucket(args.getVolumeName(),
+    ozoneManagerClient.deleteBucket(args.getVolumeName(),
         args.getBucketName());
   }
 
@@ -354,12 +353,12 @@ public final class DistributedStorageHandler implements StorageHandler {
                 OzoneConsts.MAX_LISTBUCKETS_SIZE, maxNumOfKeys));
       }
 
-      List<KsmBucketInfo> buckets =
-          keySpaceManagerClient.listBuckets(va.getVolumeName(),
+      List<OmBucketInfo> buckets =
+          ozoneManagerClient.listBuckets(va.getVolumeName(),
               args.getPrevKey(), args.getPrefix(), args.getMaxKeys());
 
       // Convert the result for the web layer.
-      for (KsmBucketInfo bucketInfo : buckets) {
+      for (OmBucketInfo bucketInfo : buckets) {
         BucketInfo bk = new BucketInfo();
         bk.setVolumeName(bucketInfo.getVolumeName());
         bk.setBucketName(bucketInfo.getBucketName());
@@ -382,26 +381,26 @@ public final class DistributedStorageHandler implements StorageHandler {
       throws IOException {
     String volumeName = args.getVolumeName();
     String bucketName = args.getBucketName();
-    KsmBucketInfo ksmBucketInfo = keySpaceManagerClient.getBucketInfo(
+    OmBucketInfo omBucketInfo = ozoneManagerClient.getBucketInfo(
         volumeName, bucketName);
-    BucketInfo bucketInfo = new BucketInfo(ksmBucketInfo.getVolumeName(),
-        ksmBucketInfo.getBucketName());
-    if(ksmBucketInfo.getIsVersionEnabled()) {
+    BucketInfo bucketInfo = new BucketInfo(omBucketInfo.getVolumeName(),
+        omBucketInfo.getBucketName());
+    if(omBucketInfo.getIsVersionEnabled()) {
       bucketInfo.setVersioning(Versioning.ENABLED);
     } else {
       bucketInfo.setVersioning(Versioning.DISABLED);
     }
-    bucketInfo.setStorageType(ksmBucketInfo.getStorageType());
-    bucketInfo.setAcls(ksmBucketInfo.getAcls());
+    bucketInfo.setStorageType(omBucketInfo.getStorageType());
+    bucketInfo.setAcls(omBucketInfo.getAcls());
     bucketInfo.setCreatedOn(
-        HddsClientUtils.formatDateTime(ksmBucketInfo.getCreationTime()));
+        HddsClientUtils.formatDateTime(omBucketInfo.getCreationTime()));
     return bucketInfo;
   }
 
   @Override
   public OutputStream newKeyWriter(KeyArgs args) throws IOException,
       OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
@@ -409,14 +408,14 @@ public final class DistributedStorageHandler implements StorageHandler {
         .setType(xceiverClientManager.getType())
         .setFactor(xceiverClientManager.getFactor())
         .build();
-    // contact KSM to allocate a block for key.
-    OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs);
+    // contact OM to allocate a block for key.
+    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
     ChunkGroupOutputStream groupOutputStream =
         new ChunkGroupOutputStream.Builder()
             .setHandler(openKey)
             .setXceiverClientManager(xceiverClientManager)
             .setScmClient(storageContainerLocationClient)
-            .setKsmClient(keySpaceManagerClient)
+            .setOmClient(ozoneManagerClient)
             .setChunkSize(chunkSize)
             .setRequestID(args.getRequestID())
             .setType(xceiverClientManager.getType())
@@ -437,56 +436,56 @@ public final class DistributedStorageHandler implements StorageHandler {
   @Override
   public LengthInputStream newKeyReader(KeyArgs args) throws IOException,
       OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
         .setDataSize(args.getSize())
         .build();
-    KsmKeyInfo keyInfo = keySpaceManagerClient.lookupKey(keyArgs);
-    return ChunkGroupInputStream.getFromKsmKeyInfo(
+    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
+    return ChunkGroupInputStream.getFromOmKeyInfo(
         keyInfo, xceiverClientManager, storageContainerLocationClient,
         args.getRequestID());
   }
 
   @Override
   public void deleteKey(KeyArgs args) throws IOException, OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
         .build();
-    keySpaceManagerClient.deleteKey(keyArgs);
+    ozoneManagerClient.deleteKey(keyArgs);
   }
 
   @Override
   public void renameKey(KeyArgs args, String toKeyName)
       throws IOException, OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
         .build();
-    keySpaceManagerClient.renameKey(keyArgs, toKeyName);
+    ozoneManagerClient.renameKey(keyArgs, toKeyName);
   }
 
   @Override
   public KeyInfo getKeyInfo(KeyArgs args) throws IOException, OzoneException {
-    KsmKeyArgs keyArgs = new KsmKeyArgs.Builder()
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(args.getVolumeName())
         .setBucketName(args.getBucketName())
         .setKeyName(args.getKeyName())
         .build();
 
-    KsmKeyInfo ksmKeyInfo = keySpaceManagerClient.lookupKey(keyArgs);
+    OmKeyInfo omKeyInfo = ozoneManagerClient.lookupKey(keyArgs);
     KeyInfo keyInfo = new KeyInfo();
     keyInfo.setVersion(0);
-    keyInfo.setKeyName(ksmKeyInfo.getKeyName());
-    keyInfo.setSize(ksmKeyInfo.getDataSize());
+    keyInfo.setKeyName(omKeyInfo.getKeyName());
+    keyInfo.setSize(omKeyInfo.getDataSize());
     keyInfo.setCreatedOn(
-        HddsClientUtils.formatDateTime(ksmKeyInfo.getCreationTime()));
+        HddsClientUtils.formatDateTime(omKeyInfo.getCreationTime()));
     keyInfo.setModifiedOn(
-        HddsClientUtils.formatDateTime(ksmKeyInfo.getModificationTime()));
+        HddsClientUtils.formatDateTime(omKeyInfo.getModificationTime()));
     return keyInfo;
   }
 
@@ -515,13 +514,13 @@ public final class DistributedStorageHandler implements StorageHandler {
                 OzoneConsts.MAX_LISTKEYS_SIZE, maxNumOfKeys));
       }
 
-      List<KsmKeyInfo> keys=
-          keySpaceManagerClient.listKeys(bucketArgs.getVolumeName(),
+      List<OmKeyInfo> keys=
+          ozoneManagerClient.listKeys(bucketArgs.getVolumeName(),
               bucketArgs.getBucketName(),
               args.getPrevKey(), args.getPrefix(), args.getMaxKeys());
 
       // Convert the result for the web layer.
-      for (KsmKeyInfo info : keys) {
+      for (OmKeyInfo info : keys) {
         KeyInfo tempInfo = new KeyInfo();
         tempInfo.setVersion(0);
         tempInfo.setKeyName(info.getKeyName());
@@ -547,7 +546,7 @@ public final class DistributedStorageHandler implements StorageHandler {
   @Override
   public void close() {
     IOUtils.cleanupWithLogger(LOG, xceiverClientManager);
-    IOUtils.cleanupWithLogger(LOG, keySpaceManagerClient);
+    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
     IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
deleted file mode 100644
index 6c75691..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * BucketManager handles all the bucket level operations.
- */
-public interface BucketManager {
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - KsmBucketInfo for creating bucket.
-   */
-  void createBucket(KsmBucketInfo bucketInfo) throws IOException;
-  /**
-   * Returns Bucket Information.
-   * @param volumeName - Name of the Volume.
-   * @param bucketName - Name of the Bucket.
-   */
-  KsmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException;
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  void setBucketProperty(KsmBucketArgs args) throws IOException;
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volumeName - Name of the volume.
-   * @param bucketName - Name of the bucket.
-   * @throws IOException
-   */
-  void deleteBucket(String volumeName, String bucketName) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link KsmBucketInfo}
-   * in the given volume.
-   *
-   * @param volumeName
-   *   Required parameter volume name determines buckets in which volume
-   *   to return.
-   * @param startBucket
-   *   Optional start bucket name parameter indicating where to start
-   *   the bucket listing from, this key is excluded from the result.
-   * @param bucketPrefix
-   *   Optional start key parameter, restricting the response to buckets
-   *   that begin with the specified name.
-   * @param maxNumOfBuckets
-   *   The maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<KsmBucketInfo> listBuckets(String volumeName,
-      String startBucket, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java
deleted file mode 100644
index 957a6d9..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/BucketManagerImpl.java
+++ /dev/null
@@ -1,315 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.util.Time;
-import org.iq80.leveldb.DBException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-
-/**
- * KSM bucket manager.
- */
-public class BucketManagerImpl implements BucketManager {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BucketManagerImpl.class);
-
-  /**
-   * KSMMetadataManager is used for accessing KSM MetadataDB and ReadWriteLock.
-   */
-  private final KSMMetadataManager metadataManager;
-
-  /**
-   * Constructs BucketManager.
-   * @param metadataManager
-   */
-  public BucketManagerImpl(KSMMetadataManager metadataManager){
-    this.metadataManager = metadataManager;
-  }
-
-  /**
-   * MetadataDB is maintained in MetadataManager and shared between
-   * BucketManager and VolumeManager. (and also by KeyManager)
-   *
-   * BucketManager uses MetadataDB to store bucket level information.
-   *
-   * Keys used in BucketManager for storing data into MetadataDB
-   * for BucketInfo:
-   * {volume/bucket} -> bucketInfo
-   *
-   * Work flow of create bucket:
-   *
-   * -> Check if the Volume exists in metadataDB, if not throw
-   * VolumeNotFoundException.
-   * -> Else check if the Bucket exists in metadataDB, if so throw
-   * BucketExistException
-   * -> Else update MetadataDB with VolumeInfo.
-   */
-
-  /**
-   * Creates a bucket.
-   * @param bucketInfo - KsmBucketInfo.
-   */
-  @Override
-  public void createBucket(KsmBucketInfo bucketInfo) throws IOException {
-    Preconditions.checkNotNull(bucketInfo);
-    metadataManager.writeLock().lock();
-    String volumeName = bucketInfo.getVolumeName();
-    String bucketName = bucketInfo.getBucketName();
-    try {
-      byte[] volumeKey = metadataManager.getVolumeKey(volumeName);
-      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-
-      //Check if the volume exists
-      if (metadataManager.get(volumeKey) == null) {
-        LOG.debug("volume: {} not found ", volumeName);
-        throw new KSMException("Volume doesn't exist",
-            KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-      //Check if bucket already exists
-      if (metadataManager.get(bucketKey) != null) {
-        LOG.debug("bucket: {} already exists ", bucketName);
-        throw new KSMException("Bucket already exist",
-            KSMException.ResultCodes.FAILED_BUCKET_ALREADY_EXISTS);
-      }
-
-      KsmBucketInfo ksmBucketInfo = KsmBucketInfo.newBuilder()
-          .setVolumeName(bucketInfo.getVolumeName())
-          .setBucketName(bucketInfo.getBucketName())
-          .setAcls(bucketInfo.getAcls())
-          .setStorageType(bucketInfo.getStorageType())
-          .setIsVersionEnabled(bucketInfo.getIsVersionEnabled())
-          .setCreationTime(Time.now())
-          .build();
-      metadataManager.put(bucketKey, ksmBucketInfo.getProtobuf().toByteArray());
-
-      LOG.debug("created bucket: {} in volume: {}", bucketName, volumeName);
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Bucket creation failed for bucket:{} in volume:{}",
-            bucketName, volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Returns Bucket Information.
-   *
-   * @param volumeName - Name of the Volume.
-   * @param bucketName - Name of the Bucket.
-   */
-  @Override
-  public KsmBucketInfo getBucketInfo(String volumeName, String bucketName)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    metadataManager.readLock().lock();
-    try {
-      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      byte[] value = metadataManager.get(bucketKey);
-      if (value == null) {
-        LOG.debug("bucket: {} not found in volume: {}.", bucketName,
-            volumeName);
-        throw new KSMException("Bucket not found",
-            KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
-      }
-      return KsmBucketInfo.getFromProtobuf(BucketInfo.parseFrom(value));
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Exception while getting bucket info for bucket: {}",
-            bucketName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-
-  /**
-   * Sets bucket property from args.
-   * @param args - BucketArgs.
-   * @throws IOException
-   */
-  @Override
-  public void setBucketProperty(KsmBucketArgs args) throws IOException {
-    Preconditions.checkNotNull(args);
-    metadataManager.writeLock().lock();
-    String volumeName = args.getVolumeName();
-    String bucketName = args.getBucketName();
-    try {
-      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      //Check if volume exists
-      if(metadataManager.get(metadataManager.getVolumeKey(volumeName)) ==
-          null) {
-        LOG.debug("volume: {} not found ", volumeName);
-        throw new KSMException("Volume doesn't exist",
-            KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-      byte[] value = metadataManager.get(bucketKey);
-      //Check if bucket exist
-      if(value == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new KSMException("Bucket doesn't exist",
-            KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
-      }
-      KsmBucketInfo oldBucketInfo = KsmBucketInfo.getFromProtobuf(
-          BucketInfo.parseFrom(value));
-      KsmBucketInfo.Builder bucketInfoBuilder = KsmBucketInfo.newBuilder();
-      bucketInfoBuilder.setVolumeName(oldBucketInfo.getVolumeName())
-          .setBucketName(oldBucketInfo.getBucketName());
-
-      //Check ACLs to update
-      if(args.getAddAcls() != null || args.getRemoveAcls() != null) {
-        bucketInfoBuilder.setAcls(getUpdatedAclList(oldBucketInfo.getAcls(),
-            args.getRemoveAcls(), args.getAddAcls()));
-        LOG.debug("Updating ACLs for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder.setAcls(oldBucketInfo.getAcls());
-      }
-
-      //Check StorageType to update
-      StorageType storageType = args.getStorageType();
-      if (storageType != null) {
-        bucketInfoBuilder.setStorageType(storageType);
-        LOG.debug("Updating bucket storage type for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder.setStorageType(oldBucketInfo.getStorageType());
-      }
-
-      //Check Versioning to update
-      Boolean versioning = args.getIsVersionEnabled();
-      if (versioning != null) {
-        bucketInfoBuilder.setIsVersionEnabled(versioning);
-        LOG.debug("Updating bucket versioning for bucket: {} in volume: {}",
-            bucketName, volumeName);
-      } else {
-        bucketInfoBuilder
-            .setIsVersionEnabled(oldBucketInfo.getIsVersionEnabled());
-      }
-      bucketInfoBuilder.setCreationTime(oldBucketInfo.getCreationTime());
-
-      metadataManager.put(bucketKey,
-          bucketInfoBuilder.build().getProtobuf().toByteArray());
-    } catch (IOException | DBException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Setting bucket property failed for bucket:{} in volume:{}",
-            bucketName, volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * Updates the existing ACL list with remove and add ACLs that are passed.
-   * Remove is done before Add.
-   *
-   * @param existingAcls - old ACL list.
-   * @param removeAcls - ACLs to be removed.
-   * @param addAcls - ACLs to be added.
-   * @return updated ACL list.
-   */
-  private List<OzoneAcl> getUpdatedAclList(List<OzoneAcl> existingAcls,
-      List<OzoneAcl> removeAcls, List<OzoneAcl> addAcls) {
-    if(removeAcls != null && !removeAcls.isEmpty()) {
-      existingAcls.removeAll(removeAcls);
-    }
-    if(addAcls != null && !addAcls.isEmpty()) {
-      addAcls.stream().filter(acl -> !existingAcls.contains(acl)).forEach(
-          existingAcls::add);
-    }
-    return existingAcls;
-  }
-
-  /**
-   * Deletes an existing empty bucket from volume.
-   * @param volumeName - Name of the volume.
-   * @param bucketName - Name of the bucket.
-   * @throws IOException
-   */
-  public void deleteBucket(String volumeName, String bucketName)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    Preconditions.checkNotNull(bucketName);
-    metadataManager.writeLock().lock();
-    try {
-      byte[] bucketKey = metadataManager.getBucketKey(volumeName, bucketName);
-      //Check if volume exists
-      if (metadataManager.get(metadataManager.getVolumeKey(volumeName))
-          == null) {
-        LOG.debug("volume: {} not found ", volumeName);
-        throw new KSMException("Volume doesn't exist",
-            KSMException.ResultCodes.FAILED_VOLUME_NOT_FOUND);
-      }
-      //Check if bucket exist
-      if (metadataManager.get(bucketKey) == null) {
-        LOG.debug("bucket: {} not found ", bucketName);
-        throw new KSMException("Bucket doesn't exist",
-            KSMException.ResultCodes.FAILED_BUCKET_NOT_FOUND);
-      }
-      //Check if bucket is empty
-      if (!metadataManager.isBucketEmpty(volumeName, bucketName)) {
-        LOG.debug("bucket: {} is not empty ", bucketName);
-        throw new KSMException("Bucket is not empty",
-            KSMException.ResultCodes.FAILED_BUCKET_NOT_EMPTY);
-      }
-      metadataManager.delete(bucketKey);
-    } catch (IOException ex) {
-      if (!(ex instanceof KSMException)) {
-        LOG.error("Delete bucket failed for bucket:{} in volume:{}", bucketName,
-            volumeName, ex);
-      }
-      throw ex;
-    } finally {
-      metadataManager.writeLock().unlock();
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<KsmBucketInfo> listBuckets(String volumeName,
-      String startBucket, String bucketPrefix, int maxNumOfBuckets)
-      throws IOException {
-    Preconditions.checkNotNull(volumeName);
-    metadataManager.readLock().lock();
-    try {
-      return metadataManager.listBuckets(
-          volumeName, startBucket, bucketPrefix, maxNumOfBuckets);
-    } finally {
-      metadataManager.readLock().unlock();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java
deleted file mode 100644
index bf22332..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.server.ServiceRuntimeInfo;
-
-/**
- * This is the JMX management interface for ksm information.
- */
-@InterfaceAudience.Private
-public interface KSMMXBean extends ServiceRuntimeInfo {
-
-  String getRpcPort();
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java
deleted file mode 100644
index f5a2d5b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManager.java
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataStore;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.locks.Lock;
-
-/**
- * KSM metadata manager interface.
- */
-public interface KSMMetadataManager {
-  /**
-   * Start metadata manager.
-   */
-  void start();
-
-  /**
-   * Stop metadata manager.
-   */
-  void stop() throws IOException;
-
-  /**
-   * Get metadata store.
-   * @return metadata store.
-   */
-  @VisibleForTesting
-  MetadataStore getStore();
-
-  /**
-   * Returns the read lock used on Metadata DB.
-   * @return readLock
-   */
-  Lock readLock();
-
-  /**
-   * Returns the write lock used on Metadata DB.
-   * @return writeLock
-   */
-  Lock writeLock();
-
-  /**
-   * Returns the value associated with this key.
-   * @param key - key
-   * @return value
-   */
-  byte[] get(byte[] key) throws IOException;
-
-  /**
-   * Puts a Key into Metadata DB.
-   * @param key   - key
-   * @param value - value
-   */
-  void put(byte[] key, byte[] value) throws IOException;
-
-  /**
-   * Deletes a Key from Metadata DB.
-   * @param key   - key
-   */
-  void delete(byte[] key) throws IOException;
-
-  /**
-   * Atomic write a batch of operations.
-   * @param batch
-   * @throws IOException
-   */
-  void writeBatch(BatchOperation batch) throws IOException;
-
-  /**
-   * Given a volume return the corresponding DB key.
-   * @param volume - Volume name
-   */
-  byte[] getVolumeKey(String volume);
-
-  /**
-   * Given a user return the corresponding DB key.
-   * @param user - User name
-   */
-  byte[] getUserKey(String user);
-
-  /**
-   * Given a volume and bucket, return the corresponding DB key.
-   * @param volume - User name
-   * @param bucket - Bucket name
-   */
-  byte[] getBucketKey(String volume, String bucket);
-
-  /**
-   * Given a volume, bucket and a key, return the corresponding DB key.
-   * @param volume - volume name
-   * @param bucket - bucket name
-   * @param key - key name
-   * @return bytes of DB key.
-   */
-  byte[] getDBKeyBytes(String volume, String bucket, String key);
-
-  /**
-   * Returns the DB key name of a deleted key in KSM metadata store.
-   * The name for a deleted key has prefix #deleting# followed by
-   * the actual key name.
-   * @param keyName - key name
-   * @return bytes of DB key.
-   */
-  byte[] getDeletedKeyName(byte[] keyName);
-
-  /**
-   * Returns the DB key name of a open key in KSM metadata store.
-   * Should be #open# prefix followed by actual key name.
-   * @param keyName - key name
-   * @param id - the id for this open
-   * @return bytes of DB key.
-   */
-  byte[] getOpenKeyNameBytes(String keyName, int id);
-
-  /**
-   * Returns the full name of a key given volume name, bucket name and key name.
-   * Generally done by padding certain delimiters.
-   *
-   * @param volumeName - volume name
-   * @param bucketName - bucket name
-   * @param keyName - key name
-   * @return the full key name.
-   */
-  String getKeyWithDBPrefix(String volumeName, String bucketName,
-      String keyName);
-
-  /**
-   * Given a volume, check if it is empty,
-   * i.e there are no buckets inside it.
-   * @param volume - Volume name
-   */
-  boolean isVolumeEmpty(String volume) throws IOException;
-
-  /**
-   * Given a volume/bucket, check if it is empty,
-   * i.e there are no keys inside it.
-   * @param volume - Volume name
-   * @param  bucket - Bucket name
-   * @return true if the bucket is empty
-   */
-  boolean isBucketEmpty(String volume, String bucket) throws IOException;
-
-  /**
-   * Returns a list of buckets represented by {@link KsmBucketInfo}
-   * in the given volume.
-   *
-   * @param volumeName
-   *   the name of the volume. This argument is required,
-   *   this method returns buckets in this given volume.
-   * @param startBucket
-   *   the start bucket name. Only the buckets whose name is
-   *   after this value will be included in the result.
-   *   This key is excluded from the result.
-   * @param bucketPrefix
-   *   bucket name prefix. Only the buckets whose name has
-   *   this prefix will be included in the result.
-   * @param maxNumOfBuckets
-   *   the maximum number of buckets to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of buckets.
-   * @throws IOException
-   */
-  List<KsmBucketInfo> listBuckets(String volumeName, String startBucket,
-      String bucketPrefix, int maxNumOfBuckets) throws IOException;
-
-  /**
-   * Returns a list of keys represented by {@link KsmKeyInfo}
-   * in the given bucket.
-   *
-   * @param volumeName
-   *   the name of the volume.
-   * @param bucketName
-   *   the name of the bucket.
-   * @param startKey
-   *   the start key name, only the keys whose name is
-   *   after this value will be included in the result.
-   *   This key is excluded from the result.
-   * @param keyPrefix
-   *   key name prefix, only the keys whose name has
-   *   this prefix will be included in the result.
-   * @param maxKeys
-   *   the maximum number of keys to return. It ensures
-   *   the size of the result will not exceed this limit.
-   * @return a list of keys.
-   * @throws IOException
-   */
-  List<KsmKeyInfo> listKeys(String volumeName,
-      String bucketName, String startKey, String keyPrefix, int maxKeys)
-      throws IOException;
-
-  /**
-   * Returns a list of volumes owned by a given user; if user is null,
-   * returns all volumes.
-   *
-   * @param userName
-   *   volume owner
-   * @param prefix
-   *   the volume prefix used to filter the listing result.
-   * @param startKey
-   *   the start volume name determines where to start listing from,
-   *   this key is excluded from the result.
-   * @param maxKeys
-   *   the maximum number of volumes to return.
-   * @return a list of {@link KsmVolumeArgs}
-   * @throws IOException
-   */
-  List<KsmVolumeArgs> listVolumes(String userName, String prefix,
-      String startKey, int maxKeys) throws IOException;
-
-  /**
-   * Returns a list of pending deletion key info that ups to the given count.
-   * Each entry is a {@link BlockGroup}, which contains the info about the
-   * key name and all its associated block IDs. A pending deletion key is
-   * stored with #deleting# prefix in KSM DB.
-   *
-   * @param count max number of keys to return.
-   * @return a list of {@link BlockGroup} represent keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getPendingDeletionKeys(int count) throws IOException;
-
-  /**
-   * Returns a list of all still open key info. Which contains the info about
-   * the key name and all its associated block IDs. A pending open key has
-   * prefix #open# in KSM DB.
-   *
-   * @return a list of {@link BlockGroup} representing keys and blocks.
-   * @throws IOException
-   */
-  List<BlockGroup> getExpiredOpenKeys() throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
deleted file mode 100644
index 6664a32..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java
+++ /dev/null
@@ -1,526 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
-import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
-
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BatchOperation;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
-import org.apache.hadoop.utils.MetadataStore;
-import org.apache.hadoop.utils.MetadataStoreBuilder;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_ID_DELIMINATOR;
-import static org.apache.hadoop.ozone.OzoneConsts.OPEN_KEY_PREFIX;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys
-    .OZONE_KSM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-
-/**
- * KSM metadata manager interface.
- */
-public class KSMMetadataManagerImpl implements KSMMetadataManager {
-
-  private final MetadataStore store;
-  private final ReadWriteLock lock;
-  private final long openKeyExpireThresholdMS;
-
-  public KSMMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
-    File metaDir = getOzoneMetaDirPath(conf);
-    final int cacheSize = conf.getInt(OZONE_KSM_DB_CACHE_SIZE_MB,
-        OZONE_KSM_DB_CACHE_SIZE_DEFAULT);
-    File ksmDBFile = new File(metaDir.getPath(), KSM_DB_NAME);
-    this.store = MetadataStoreBuilder.newBuilder()
-        .setConf(conf)
-        .setDbFile(ksmDBFile)
-        .setCacheSize(cacheSize * OzoneConsts.MB)
-        .build();
-    this.lock = new ReentrantReadWriteLock();
-    this.openKeyExpireThresholdMS = 1000 * conf.getInt(
-        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
-        OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
-  }
-
-  /**
-   * Start metadata manager.
-   */
-  @Override
-  public void start() {
-
-  }
-
-  /**
-   * Stop metadata manager.
-   */
-  @Override
-  public void stop() throws IOException {
-    if (store != null) {
-      store.close();
-    }
-  }
-
-  /**
-   * Get metadata store.
-   * @return store - metadata store.
-   */
-  @VisibleForTesting
-  @Override
-  public MetadataStore getStore() {
-    return store;
-  }
-
-  /**
-   * Given a volume return the corresponding DB key.
-   * @param volume - Volume name
-   */
-  public byte[] getVolumeKey(String volume) {
-    String dbVolumeName = OzoneConsts.KSM_VOLUME_PREFIX + volume;
-    return DFSUtil.string2Bytes(dbVolumeName);
-  }
-
-  /**
-   * Given a user return the corresponding DB key.
-   * @param user - User name
-   */
-  public byte[] getUserKey(String user) {
-    String dbUserName = OzoneConsts.KSM_USER_PREFIX + user;
-    return DFSUtil.string2Bytes(dbUserName);
-  }
-
-  /**
-   * Given a volume and bucket, return the corresponding DB key.
-   * @param volume - User name
-   * @param bucket - Bucket name
-   */
-  public byte[] getBucketKey(String volume, String bucket) {
-    String bucketKeyString = OzoneConsts.KSM_VOLUME_PREFIX + volume
-        + OzoneConsts.KSM_BUCKET_PREFIX + bucket;
-    return DFSUtil.string2Bytes(bucketKeyString);
-  }
-
-  /**
-   * @param volume
-   * @param bucket
-   * @return
-   */
-  private String getBucketWithDBPrefix(String volume, String bucket) {
-    StringBuffer sb = new StringBuffer();
-    sb.append(OzoneConsts.KSM_VOLUME_PREFIX)
-        .append(volume)
-        .append(OzoneConsts.KSM_BUCKET_PREFIX);
-    if (!Strings.isNullOrEmpty(bucket)) {
-      sb.append(bucket);
-    }
-    return sb.toString();
-  }
-
-  @Override
-  public String getKeyWithDBPrefix(String volume, String bucket, String key) {
-    String keyVB = OzoneConsts.KSM_KEY_PREFIX + volume
-        + OzoneConsts.KSM_KEY_PREFIX + bucket
-        + OzoneConsts.KSM_KEY_PREFIX;
-    return Strings.isNullOrEmpty(key) ? keyVB : keyVB + key;
-  }
-
-  @Override
-  public byte[] getDBKeyBytes(String volume, String bucket, String key) {
-    return DFSUtil.string2Bytes(getKeyWithDBPrefix(volume, bucket, key));
-  }
-
-  @Override
-  public byte[] getDeletedKeyName(byte[] keyName) {
-    return DFSUtil.string2Bytes(
-        DELETING_KEY_PREFIX + DFSUtil.bytes2String(keyName));
-  }
-
-  @Override
-  public byte[] getOpenKeyNameBytes(String keyName, int id) {
-    return DFSUtil.string2Bytes(OPEN_KEY_PREFIX + id +
-        OPEN_KEY_ID_DELIMINATOR + keyName);
-  }
-
-  /**
-   * Returns the read lock used on Metadata DB.
-   * @return readLock
-   */
-  @Override
-  public Lock readLock() {
-    return lock.readLock();
-  }
-
-  /**
-   * Returns the write lock used on Metadata DB.
-   * @return writeLock
-   */
-  @Override
-  public Lock writeLock() {
-    return lock.writeLock();
-  }
-
-  /**
-   * Returns the value associated with this key.
-   * @param key - key
-   * @return value
-   */
-  @Override
-  public byte[] get(byte[] key) throws IOException {
-    return store.get(key);
-  }
-
-  /**
-   * Puts a Key into Metadata DB.
-   * @param key   - key
-   * @param value - value
-   */
-  @Override
-  public void put(byte[] key, byte[] value) throws IOException {
-    store.put(key, value);
-  }
-
-  /**
-   * Deletes a Key from Metadata DB.
-   * @param key   - key
-   */
-  public void delete(byte[] key) throws IOException {
-    store.delete(key);
-  }
-
-  @Override
-  public void writeBatch(BatchOperation batch) throws IOException {
-    this.store.writeBatch(batch);
-  }
-
-  /**
-   * Given a volume, check if it is empty, i.e there are no buckets inside it.
-   * @param volume - Volume name
-   * @return true if the volume is empty
-   */
-  public boolean isVolumeEmpty(String volume) throws IOException {
-    String dbVolumeRootName = OzoneConsts.KSM_VOLUME_PREFIX + volume
-        + OzoneConsts.KSM_BUCKET_PREFIX;
-    byte[] dbVolumeRootKey = DFSUtil.string2Bytes(dbVolumeRootName);
-    ImmutablePair<byte[], byte[]> volumeRoot =
-        store.peekAround(0, dbVolumeRootKey);
-    if (volumeRoot != null) {
-      return !DFSUtil.bytes2String(volumeRoot.getKey())
-          .startsWith(dbVolumeRootName);
-    }
-    return true;
-  }
-
-  /**
-   * Given a volume/bucket, check if it is empty,
-   * i.e there are no keys inside it.
-   * @param volume - Volume name
-   * @param bucket - Bucket name
-   * @return true if the bucket is empty
-   */
-  public boolean isBucketEmpty(String volume, String bucket)
-      throws IOException {
-    String keyRootName = getKeyWithDBPrefix(volume, bucket, null);
-    byte[] keyRoot = DFSUtil.string2Bytes(keyRootName);
-    ImmutablePair<byte[], byte[]> firstKey = store.peekAround(0, keyRoot);
-    if (firstKey != null) {
-      return !DFSUtil.bytes2String(firstKey.getKey())
-          .startsWith(keyRootName);
-    }
-    return true;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public List<KsmBucketInfo> listBuckets(final String volumeName,
-      final String startBucket, final String bucketPrefix,
-      final int maxNumOfBuckets) throws IOException {
-    List<KsmBucketInfo> result = new ArrayList<>();
-    if (Strings.isNullOrEmpty(volumeName)) {
-      throw new KSMException("Volume name is required.",
-          ResultCodes.FAILED_VOLUME_NOT_FOUND);
-    }
-
-    byte[] volumeNameBytes = getVolumeKey(volumeName);
-    if (store.get(volumeNameBytes) == null) {
-      throw new KSMException("Volume " + volumeName + " not found.",
-          ResultCodes.FAILED_VOLUME_NOT_FOUND);
-    }
-
-
-    // A bucket starts with /#volume/#bucket_prefix
-    MetadataKeyFilter filter = (preKey, currentKey, nextKey) -> {
-      if (currentKey != null) {
-        String bucketNamePrefix =
-                getBucketWithDBPrefix(volumeName, bucketPrefix);
-        String bucket = DFSUtil.bytes2String(currentKey);
-        return bucket.startsWith(bucketNamePrefix);
-      }
-      return false;
-    };
-
-    List<Map.Entry<byte[], byte[]>> rangeResult;
-    if (!Strings.isNullOrEmpty(startBucket)) {
-      // Since we are excluding start key from the result,
-      // the maxNumOfBuckets is incremented.
-      rangeResult = store.getSequentialRangeKVs(
-          getBucketKey(volumeName, startBucket),
-          maxNumOfBuckets + 1, filter);
-      if (!rangeResult.isEmpty()) {
-        //Remove start key from result.
-        rangeResult.remove(0);
-      }
-    } else {
-      rangeResult = store.getSequentialRangeKVs(null, maxNumOfBuckets, filter);
-    }
-
-    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
-      KsmBucketInfo info = KsmBucketInfo.getFromProtobuf(
-          BucketInfo.parseFrom(entry.getValue()));
-      result.add(info);
-    }
-    return result;
-  }
-
-  @Override
-  public List<KsmKeyInfo> listKeys(String volumeName, String bucketName,
-      String startKey, String keyPrefix, int maxKeys) throws IOException {
-    List<KsmKeyInfo> result = new ArrayList<>();
-    if (Strings.isNullOrEmpty(volumeName)) {
-      throw new KSMException("Volume name is required.",
-          ResultCodes.FAILED_VOLUME_NOT_FOUND);
-    }
-
-    if (Strings.isNullOrEmpty(bucketName)) {
-      throw new KSMException("Bucket name is required.",
-          ResultCodes.FAILED_BUCKET_NOT_FOUND);
-    }
-
-    byte[] bucketNameBytes = getBucketKey(volumeName, bucketName);
-    if (store.get(bucketNameBytes) == null) {
-      throw new KSMException("Bucket " + bucketName + " not found.",
-          ResultCodes.FAILED_BUCKET_NOT_FOUND);
-    }
-
-    MetadataKeyFilter filter = new KeyPrefixFilter()
-        .addFilter(getKeyWithDBPrefix(volumeName, bucketName, keyPrefix));
-
-    List<Map.Entry<byte[], byte[]>> rangeResult;
-    if (!Strings.isNullOrEmpty(startKey)) {
-      //Since we are excluding start key from the result,
-      // the maxNumOfBuckets is incremented.
-      rangeResult = store.getSequentialRangeKVs(
-          getDBKeyBytes(volumeName, bucketName, startKey),
-          maxKeys + 1, filter);
-      if (!rangeResult.isEmpty()) {
-        //Remove start key from result.
-        rangeResult.remove(0);
-      }
-    } else {
-      rangeResult = store.getSequentialRangeKVs(null, maxKeys, filter);
-    }
-
-    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
-      KsmKeyInfo info = KsmKeyInfo.getFromProtobuf(
-          KeyInfo.parseFrom(entry.getValue()));
-      result.add(info);
-    }
-    return result;
-  }
-
-  @Override
-  public List<KsmVolumeArgs> listVolumes(String userName,
-      String prefix, String startKey, int maxKeys) throws IOException {
-    List<KsmVolumeArgs> result = Lists.newArrayList();
-    VolumeList volumes;
-    if (Strings.isNullOrEmpty(userName)) {
-      volumes = getAllVolumes();
-    } else {
-      volumes = getVolumesByUser(userName);
-    }
-
-    if (volumes == null || volumes.getVolumeNamesCount() == 0) {
-      return result;
-    }
-
-    boolean startKeyFound = Strings.isNullOrEmpty(startKey);
-    for (String volumeName : volumes.getVolumeNamesList()) {
-      if (!Strings.isNullOrEmpty(prefix)) {
-        if (!volumeName.startsWith(prefix)) {
-          continue;
-        }
-      }
-
-      if (!startKeyFound && volumeName.equals(startKey)) {
-        startKeyFound = true;
-        continue;
-      }
-      if (startKeyFound && result.size() < maxKeys) {
-        byte[] volumeInfo = store.get(this.getVolumeKey(volumeName));
-        if (volumeInfo == null) {
-          // Could not get volume info by given volume name,
-          // since the volume name is loaded from db,
-          // this probably means ksm db is corrupted or some entries are
-          // accidentally removed.
-          throw new KSMException("Volume info not found for " + volumeName,
-              ResultCodes.FAILED_VOLUME_NOT_FOUND);
-        }
-        VolumeInfo info = VolumeInfo.parseFrom(volumeInfo);
-        KsmVolumeArgs volumeArgs = KsmVolumeArgs.getFromProtobuf(info);
-        result.add(volumeArgs);
-      }
-    }
-
-    return result;
-  }
-
-  private VolumeList getVolumesByUser(String userName)
-      throws KSMException {
-    return getVolumesByUser(getUserKey(userName));
-  }
-
-  private VolumeList getVolumesByUser(byte[] userNameKey)
-      throws KSMException {
-    VolumeList volumes = null;
-    try {
-      byte[] volumesInBytes = store.get(userNameKey);
-      if (volumesInBytes == null) {
-        // No volume found for this user, return an empty list
-        return VolumeList.newBuilder().build();
-      }
-      volumes = VolumeList.parseFrom(volumesInBytes);
-    } catch (IOException e) {
-      throw new KSMException("Unable to get volumes info by the given user, "
-          + "metadata might be corrupted", e,
-          ResultCodes.FAILED_METADATA_ERROR);
-    }
-    return volumes;
-  }
-
-  private VolumeList getAllVolumes() throws IOException {
-    // Scan all users in database
-    KeyPrefixFilter filter =
-        new KeyPrefixFilter().addFilter(OzoneConsts.KSM_USER_PREFIX);
-    // We are not expecting a huge number of users per cluster,
-    // it should be fine to scan all users in db and return us a
-    // list of volume names in string per user.
-    List<Map.Entry<byte[], byte[]>> rangeKVs = store
-        .getSequentialRangeKVs(null, Integer.MAX_VALUE, filter);
-
-    VolumeList.Builder builder = VolumeList.newBuilder();
-    for (Map.Entry<byte[], byte[]> entry : rangeKVs) {
-      VolumeList volumes = this.getVolumesByUser(entry.getKey());
-      builder.addAllVolumeNames(volumes.getVolumeNamesList());
-    }
-
-    return builder.build();
-  }
-
-  @Override
-  public List<BlockGroup> getPendingDeletionKeys(final int count)
-      throws IOException {
-    List<BlockGroup> keyBlocksList = Lists.newArrayList();
-    List<Map.Entry<byte[], byte[]>> rangeResult =
-        store.getRangeKVs(null, count,
-            MetadataKeyFilters.getDeletingKeyFilter());
-    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
-      KsmKeyInfo info =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue()));
-      // Get block keys as a list.
-      KsmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
-      if (latest == null) {
-        return Collections.emptyList();
-      }
-      List<BlockID> item = latest.getLocationList().stream()
-          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
-          .collect(Collectors.toList());
-      BlockGroup keyBlocks = BlockGroup.newBuilder()
-          .setKeyName(DFSUtil.bytes2String(entry.getKey()))
-          .addAllBlockIDs(item)
-          .build();
-      keyBlocksList.add(keyBlocks);
-    }
-    return keyBlocksList;
-  }
-
-  @Override
-  public List<BlockGroup> getExpiredOpenKeys() throws IOException {
-    List<BlockGroup> keyBlocksList = Lists.newArrayList();
-    long now = Time.now();
-    final MetadataKeyFilter openKeyFilter =
-        new KeyPrefixFilter().addFilter(OPEN_KEY_PREFIX);
-    List<Map.Entry<byte[], byte[]>> rangeResult =
-        store.getSequentialRangeKVs(null, Integer.MAX_VALUE,
-            openKeyFilter);
-    for (Map.Entry<byte[], byte[]> entry : rangeResult) {
-      KsmKeyInfo info =
-          KsmKeyInfo.getFromProtobuf(KeyInfo.parseFrom(entry.getValue()));
-      long lastModify = info.getModificationTime();
-      if (now - lastModify < this.openKeyExpireThresholdMS) {
-        // consider as may still be active, not hanging.
-        continue;
-      }
-      // Get block keys as a list.
-      List<BlockID> item = info.getLatestVersionLocations()
-          .getBlocksLatestVersionOnly().stream()
-          .map(b->new BlockID(b.getContainerID(), b.getLocalID()))
-          .collect(Collectors.toList());
-      BlockGroup keyBlocks = BlockGroup.newBuilder()
-          .setKeyName(DFSUtil.bytes2String(entry.getKey()))
-          .addAllBlockIDs(item)
-          .build();
-      keyBlocksList.add(keyBlocks);
-    }
-    return keyBlocksList;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
deleted file mode 100644
index 8ee67c3..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetrics.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics2.MetricsSystem;
-import org.apache.hadoop.metrics2.annotation.Metric;
-import org.apache.hadoop.metrics2.annotation.Metrics;
-import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-
-/**
- * This class is for maintaining KeySpaceManager statistics.
- */
-@InterfaceAudience.Private
-@Metrics(about="Key Space Manager Metrics", context="dfs")
-public class KSMMetrics {
-  private static final String SOURCE_NAME =
-      KSMMetrics.class.getSimpleName();
-
-  // KSM request type op metrics
-  private @Metric MutableCounterLong numVolumeOps;
-  private @Metric MutableCounterLong numBucketOps;
-  private @Metric MutableCounterLong numKeyOps;
-
-  // KSM op metrics
-  private @Metric MutableCounterLong numVolumeCreates;
-  private @Metric MutableCounterLong numVolumeUpdates;
-  private @Metric MutableCounterLong numVolumeInfos;
-  private @Metric MutableCounterLong numVolumeCheckAccesses;
-  private @Metric MutableCounterLong numBucketCreates;
-  private @Metric MutableCounterLong numVolumeDeletes;
-  private @Metric MutableCounterLong numBucketInfos;
-  private @Metric MutableCounterLong numBucketUpdates;
-  private @Metric MutableCounterLong numBucketDeletes;
-  private @Metric MutableCounterLong numKeyAllocate;
-  private @Metric MutableCounterLong numKeyLookup;
-  private @Metric MutableCounterLong numKeyRenames;
-  private @Metric MutableCounterLong numKeyDeletes;
-  private @Metric MutableCounterLong numBucketLists;
-  private @Metric MutableCounterLong numKeyLists;
-  private @Metric MutableCounterLong numVolumeLists;
-  private @Metric MutableCounterLong numKeyCommits;
-  private @Metric MutableCounterLong numAllocateBlockCalls;
-  private @Metric MutableCounterLong numGetServiceLists;
-
-  // Failure Metrics
-  private @Metric MutableCounterLong numVolumeCreateFails;
-  private @Metric MutableCounterLong numVolumeUpdateFails;
-  private @Metric MutableCounterLong numVolumeInfoFails;
-  private @Metric MutableCounterLong numVolumeDeleteFails;
-  private @Metric MutableCounterLong numBucketCreateFails;
-  private @Metric MutableCounterLong numVolumeCheckAccessFails;
-  private @Metric MutableCounterLong numBucketInfoFails;
-  private @Metric MutableCounterLong numBucketUpdateFails;
-  private @Metric MutableCounterLong numBucketDeleteFails;
-  private @Metric MutableCounterLong numKeyAllocateFails;
-  private @Metric MutableCounterLong numKeyLookupFails;
-  private @Metric MutableCounterLong numKeyRenameFails;
-  private @Metric MutableCounterLong numKeyDeleteFails;
-  private @Metric MutableCounterLong numBucketListFails;
-  private @Metric MutableCounterLong numKeyListFails;
-  private @Metric MutableCounterLong numVolumeListFails;
-  private @Metric MutableCounterLong numKeyCommitFails;
-  private @Metric MutableCounterLong numBlockAllocateCallFails;
-  private @Metric MutableCounterLong numGetServiceListFails;
-
-  public KSMMetrics() {
-  }
-
-  public static KSMMetrics create() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    return ms.register(SOURCE_NAME,
-        "Key Space Manager Metrics",
-        new KSMMetrics());
-  }
-
-  public void incNumVolumeCreates() {
-    numVolumeOps.incr();
-    numVolumeCreates.incr();
-  }
-
-  public void incNumVolumeUpdates() {
-    numVolumeOps.incr();
-    numVolumeUpdates.incr();
-  }
-
-  public void incNumVolumeInfos() {
-    numVolumeOps.incr();
-    numVolumeInfos.incr();
-  }
-
-  public void incNumVolumeDeletes() {
-    numVolumeOps.incr();
-    numVolumeDeletes.incr();
-  }
-
-  public void incNumVolumeCheckAccesses() {
-    numVolumeOps.incr();
-    numVolumeCheckAccesses.incr();
-  }
-
-  public void incNumBucketCreates() {
-    numBucketOps.incr();
-    numBucketCreates.incr();
-  }
-
-  public void incNumBucketInfos() {
-    numBucketOps.incr();
-    numBucketInfos.incr();
-  }
-
-  public void incNumBucketUpdates() {
-    numBucketOps.incr();
-    numBucketUpdates.incr();
-  }
-
-  public void incNumBucketDeletes() {
-    numBucketOps.incr();
-    numBucketDeletes.incr();
-  }
-
-  public void incNumBucketLists() {
-    numBucketOps.incr();
-    numBucketLists.incr();
-  }
-
-  public void incNumKeyLists() {
-    numKeyOps.incr();
-    numKeyLists.incr();
-  }
-
-  public void incNumVolumeLists() {
-    numVolumeOps.incr();
-    numVolumeLists.incr();
-  }
-
-  public void incNumGetServiceLists() {
-    numGetServiceLists.incr();
-  }
-
-  public void incNumVolumeCreateFails() {
-    numVolumeCreateFails.incr();
-  }
-
-  public void incNumVolumeUpdateFails() {
-    numVolumeUpdateFails.incr();
-  }
-
-  public void incNumVolumeInfoFails() {
-    numVolumeInfoFails.incr();
-  }
-
-  public void incNumVolumeDeleteFails() {
-    numVolumeDeleteFails.incr();
-  }
-
-  public void incNumVolumeCheckAccessFails() {
-    numVolumeCheckAccessFails.incr();
-  }
-
-  public void incNumBucketCreateFails() {
-    numBucketCreateFails.incr();
-  }
-
-  public void incNumBucketInfoFails() {
-    numBucketInfoFails.incr();
-  }
-
-  public void incNumBucketUpdateFails() {
-    numBucketUpdateFails.incr();
-  }
-
-  public void incNumBucketDeleteFails() {
-    numBucketDeleteFails.incr();
-  }
-
-  public void incNumKeyAllocates() {
-    numKeyOps.incr();
-    numKeyAllocate.incr();
-  }
-
-  public void incNumKeyAllocateFails() {
-    numKeyAllocateFails.incr();
-  }
-
-  public void incNumKeyLookups() {
-    numKeyOps.incr();
-    numKeyLookup.incr();
-  }
-
-  public void incNumKeyLookupFails() {
-    numKeyLookupFails.incr();
-  }
-
-  public void incNumKeyRenames() {
-    numKeyOps.incr();
-    numKeyRenames.incr();
-  }
-
-  public void incNumKeyRenameFails() {
-    numKeyOps.incr();
-    numKeyRenameFails.incr();
-  }
-
-  public void incNumKeyDeleteFails() {
-    numKeyDeleteFails.incr();
-  }
-
-  public void incNumKeyDeletes() {
-    numKeyOps.incr();
-    numKeyDeletes.incr();
-  }
-
-  public void incNumKeyCommits() {
-    numKeyOps.incr();
-    numKeyCommits.incr();
-  }
-
-  public void incNumKeyCommitFails() {
-    numKeyCommitFails.incr();
-  }
-
-  public void incNumBlockAllocateCalls() {
-    numAllocateBlockCalls.incr();
-  }
-
-  public void incNumBlockAllocateCallFails() {
-    numBlockAllocateCallFails.incr();
-  }
-
-  public void incNumBucketListFails() {
-    numBucketListFails.incr();
-  }
-
-  public void incNumKeyListFails() {
-    numKeyListFails.incr();
-  }
-
-  public void incNumVolumeListFails() {
-    numVolumeListFails.incr();
-  }
-
-  public void incNumGetServiceListFails() {
-    numGetServiceListFails.incr();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCreates() {
-    return numVolumeCreates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeUpdates() {
-    return numVolumeUpdates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeInfos() {
-    return numVolumeInfos.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeDeletes() {
-    return numVolumeDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCheckAccesses() {
-    return numVolumeCheckAccesses.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketCreates() {
-    return numBucketCreates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketInfos() {
-    return numBucketInfos.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketUpdates() {
-    return numBucketUpdates.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketDeletes() {
-    return numBucketDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketLists() {
-    return numBucketLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeLists() {
-    return numVolumeLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLists() {
-    return numKeyLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumGetServiceLists() {
-    return numGetServiceLists.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCreateFails() {
-    return numVolumeCreateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeUpdateFails() {
-    return numVolumeUpdateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeInfoFails() {
-    return numVolumeInfoFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeDeleteFails() {
-    return numVolumeDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeCheckAccessFails() {
-    return numVolumeCheckAccessFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketCreateFails() {
-    return numBucketCreateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketInfoFails() {
-    return numBucketInfoFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketUpdateFails() {
-    return numBucketUpdateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketDeleteFails() {
-    return numBucketDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyAllocates() {
-    return numKeyAllocate.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyAllocateFails() {
-    return numKeyAllocateFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLookups() {
-    return numKeyLookup.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyLookupFails() {
-    return numKeyLookupFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyRenames() {
-    return numKeyRenames.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyRenameFails() {
-    return numKeyRenameFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyDeletes() {
-    return numKeyDeletes.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyDeletesFails() {
-    return numKeyDeleteFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBucketListFails() {
-    return numBucketListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyListFails() {
-    return numKeyListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumVolumeListFails() {
-    return numVolumeListFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyCommits() {
-    return numKeyCommits.value();
-  }
-
-  @VisibleForTesting
-  public long getNumKeyCommitFails() {
-    return numKeyCommitFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBlockAllocates() {
-    return numAllocateBlockCalls.value();
-  }
-
-  @VisibleForTesting
-  public long getNumBlockAllocateFails() {
-    return numBlockAllocateCallFails.value();
-  }
-
-  @VisibleForTesting
-  public long getNumGetServiceListFails() {
-    return numGetServiceListFails.value();
-  }
-
-  public void unRegister() {
-    MetricsSystem ms = DefaultMetricsSystem.instance();
-    ms.unregisterSource(SOURCE_NAME);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java
deleted file mode 100644
index 015bed6..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import java.io.IOException;
-import java.util.Properties;
-import java.util.UUID;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.common.Storage;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
-
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID;
-import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
-
-/**
- * KSMStorage is responsible for management of the StorageDirectories used by
- * the KSM.
- */
-public class KSMStorage extends Storage {
-
-  public static final String STORAGE_DIR = "ksm";
-  public static final String KSM_ID = "ksmUuid";
-
-  /**
-   * Construct KSMStorage.
-   * @throws IOException if any directories are inaccessible.
-   */
-  public KSMStorage(OzoneConfiguration conf) throws IOException {
-    super(NodeType.KSM, getOzoneMetaDirPath(conf), STORAGE_DIR);
-  }
-
-  public void setScmId(String scmId) throws IOException {
-    if (getState() == StorageState.INITIALIZED) {
-      throw new IOException("KSM is already initialized.");
-    } else {
-      getStorageInfo().setProperty(SCM_ID, scmId);
-    }
-  }
-
-  public void setKsmId(String ksmId) throws IOException {
-    if (getState() == StorageState.INITIALIZED) {
-      throw new IOException("KSM is already initialized.");
-    } else {
-      getStorageInfo().setProperty(KSM_ID, ksmId);
-    }
-  }
-
-  /**
-   * Retrieves the SCM ID from the version file.
-   * @return SCM_ID
-   */
-  public String getScmId() {
-    return getStorageInfo().getProperty(SCM_ID);
-  }
-
-  /**
-   * Retrieves the KSM ID from the version file.
-   * @return KSM_ID
-   */
-  public String getKsmId() {
-    return getStorageInfo().getProperty(KSM_ID);
-  }
-
-  @Override
-  protected Properties getNodeProperties() {
-    String ksmId = getKsmId();
-    if (ksmId == null) {
-      ksmId = UUID.randomUUID().toString();
-    }
-    Properties ksmProperties = new Properties();
-    ksmProperties.setProperty(KSM_ID, ksmId);
-    return ksmProperties;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
deleted file mode 100644
index e51ab28..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.BackgroundTask;
-import org.apache.hadoop.utils.BackgroundTaskQueue;
-import org.apache.hadoop.utils.BackgroundTaskResult;
-import org.apache.hadoop.utils.BackgroundTaskResult.EmptyTaskResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT;
-
-/**
- * This is the background service to delete keys.
- * Scan the metadata of ksm periodically to get
- * the keys with prefix "#deleting" and ask scm to
- * delete metadata accordingly, if scm returns
- * success for keys, then clean up those keys.
- */
-public class KeyDeletingService extends BackgroundService {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyDeletingService.class);
-
-  // The thread pool size for key deleting service.
-  private final static int KEY_DELETING_CORE_POOL_SIZE = 2;
-
-  private final ScmBlockLocationProtocol scmClient;
-  private final KeyManager manager;
-  private final int keyLimitPerTask;
-
-  public KeyDeletingService(ScmBlockLocationProtocol scmClient,
-      KeyManager manager, long serviceInterval,
-      long serviceTimeout, Configuration conf) {
-    super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS,
-        KEY_DELETING_CORE_POOL_SIZE, serviceTimeout);
-    this.scmClient = scmClient;
-    this.manager = manager;
-    this.keyLimitPerTask = conf.getInt(OZONE_KEY_DELETING_LIMIT_PER_TASK,
-        OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT);
-  }
-
-  @Override
-  public BackgroundTaskQueue getTasks() {
-    BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    queue.add(new KeyDeletingTask());
-    return queue;
-  }
-
-  /**
-   * A key deleting task scans KSM DB and looking for a certain number
-   * of pending-deletion keys, sends these keys along with their associated
-   * blocks to SCM for deletion. Once SCM confirms keys are deleted (once
-   * SCM persisted the blocks info in its deletedBlockLog), it removes
-   * these keys from the DB.
-   */
-  private class KeyDeletingTask implements
-      BackgroundTask<BackgroundTaskResult> {
-
-    @Override
-    public int getPriority() {
-      return 0;
-    }
-
-    @Override
-    public BackgroundTaskResult call() throws Exception {
-      try {
-        long startTime = Time.monotonicNow();
-        List<BlockGroup> keyBlocksList = manager
-            .getPendingDeletionKeys(keyLimitPerTask);
-        if (keyBlocksList.size() > 0) {
-          LOG.info("Found {} to-delete keys in KSM", keyBlocksList.size());
-          List<DeleteBlockGroupResult> results =
-              scmClient.deleteKeyBlocks(keyBlocksList);
-          for (DeleteBlockGroupResult result : results) {
-            if (result.isSuccess()) {
-              try {
-                // Purge key from KSM DB.
-                manager.deletePendingDeletionKey(result.getObjectKey());
-                LOG.debug("Key {} deleted from KSM DB", result.getObjectKey());
-              } catch (IOException e) {
-                // if a pending deletion key is failed to delete,
-                // print a warning here and retain it in this state,
-                // so that it can be attempt to delete next time.
-                LOG.warn("Failed to delete pending-deletion key {}",
-                    result.getObjectKey(), e);
-              }
-            } else {
-              // Key deletion failed, retry in next interval.
-              LOG.warn("Key {} deletion failed because some of the blocks"
-                  + " were failed to delete, failed blocks: {}",
-                  result.getObjectKey(),
-                  StringUtils.join(",", result.getFailedBlocks()));
-            }
-          }
-
-          if (!results.isEmpty()) {
-            LOG.info("Number of key deleted from KSM DB: {},"
-                + " task elapsed time: {}ms",
-                results.size(), Time.monotonicNow() - startTime);
-          }
-
-          return results::size;
-        } else {
-          LOG.debug("No pending deletion key found in KSM");
-        }
-      } catch (IOException e) {
-        LOG.error("Unable to get pending deletion keys, retry in"
-            + " next interval", e);
-      }
-      return EmptyTaskResult.newResult();
-    }
-  }
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: HDDS-175. Refactor ContainerInfo to remove Pipeline object from it. Contributed by Ajay Kumar.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
index d6f5d32..a9781b1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -79,14 +79,16 @@ public class TestXceiverClientMetrics {
     OzoneConfiguration conf = new OzoneConfiguration();
     XceiverClientManager clientManager = new XceiverClientManager(conf);
 
-    ContainerInfo container = storageContainerLocationClient
+    ContainerWithPipeline container = storageContainerLocationClient
         .allocateContainer(clientManager.getType(), clientManager.getFactor(),
             containerOwner);
-    XceiverClientSpi client = clientManager.acquireClient(
-        container.getPipeline(), container.getContainerID());
+    XceiverClientSpi client = clientManager
+        .acquireClient(container.getPipeline(),
+            container.getContainerInfo().getContainerID());
 
     ContainerCommandRequestProto request = ContainerTestHelper
-        .getCreateContainerRequest(container.getContainerID(),
+        .getCreateContainerRequest(
+            container.getContainerInfo().getContainerID(),
             container.getPipeline());
     client.sendCommand(request);
 
@@ -112,7 +114,7 @@ public class TestXceiverClientMetrics {
           // use async interface for testing pending metrics
           for (int i = 0; i < numRequest; i++) {
             BlockID blockID = ContainerTestHelper.
-                getTestBlockID(container.getContainerID());
+                getTestBlockID(container.getContainerInfo().getContainerID());
             ContainerProtos.ContainerCommandRequestProto smallFileRequest;
 
             smallFileRequest = ContainerTestHelper.getWriteSmallFileRequest(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
index 375450c..c344bbe 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java
@@ -60,7 +60,9 @@ public class BenchMarkContainerStateMap {
       try {
         ContainerInfo containerInfo = new ContainerInfo.Builder()
             .setState(CLOSED)
-            .setPipeline(pipeline)
+            .setPipelineName(pipeline.getPipelineName())
+            .setReplicationType(pipeline.getType())
+            .setReplicationFactor(pipeline.getFactor())
             // This is bytes allocated for blocks inside container, not the
             // container size
             .setAllocatedBytes(0)
@@ -81,7 +83,9 @@ public class BenchMarkContainerStateMap {
       try {
         ContainerInfo containerInfo = new ContainerInfo.Builder()
             .setState(OPEN)
-            .setPipeline(pipeline)
+            .setPipelineName(pipeline.getPipelineName())
+            .setReplicationType(pipeline.getType())
+            .setReplicationFactor(pipeline.getFactor())
             // This is bytes allocated for blocks inside container, not the
             // container size
             .setAllocatedBytes(0)
@@ -101,7 +105,9 @@ public class BenchMarkContainerStateMap {
     try {
       ContainerInfo containerInfo = new ContainerInfo.Builder()
           .setState(OPEN)
-          .setPipeline(pipeline)
+          .setPipelineName(pipeline.getPipelineName())
+          .setReplicationType(pipeline.getType())
+          .setReplicationFactor(pipeline.getFactor())
           // This is bytes allocated for blocks inside container, not the
           // container size
           .setAllocatedBytes(0)
@@ -166,7 +172,9 @@ public class BenchMarkContainerStateMap {
     int cid = state.containerID.incrementAndGet();
     ContainerInfo containerInfo = new ContainerInfo.Builder()
         .setState(CLOSED)
-        .setPipeline(pipeline)
+        .setPipelineName(pipeline.getPipelineName())
+        .setReplicationType(pipeline.getType())
+        .setReplicationFactor(pipeline.getFactor())
         // This is bytes allocated for blocks inside container, not the
         // container size
         .setAllocatedBytes(0)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
index edc0d7b..26776c5 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.scm.cli;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
+import com.google.protobuf.ByteString;
 import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
@@ -37,7 +38,6 @@ import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyI
 import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo;
 import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -86,12 +86,12 @@ public class SQLCLI  extends Configured implements Tool {
   private static final String CREATE_CONTAINER_INFO =
       "CREATE TABLE containerInfo (" +
           "containerID LONG PRIMARY KEY NOT NULL, " +
-          "leaderUUID TEXT NOT NULL)";
-  private static final String CREATE_CONTAINER_MEMBERS =
-      "CREATE TABLE containerMembers (" +
-          "containerName TEXT NOT NULL, " +
-          "datanodeUUID TEXT NOT NULL," +
-          "PRIMARY KEY(containerName, datanodeUUID));";
+          "replicationType TEXT NOT NULL," +
+          "replicationFactor TEXT NOT NULL," +
+          "usedBytes LONG NOT NULL," +
+          "allocatedBytes LONG NOT NULL," +
+          "owner TEXT," +
+          "numberOfKeys LONG)";
   private static final String CREATE_DATANODE_INFO =
       "CREATE TABLE datanodeInfo (" +
           "hostName TEXT NOT NULL, " +
@@ -99,8 +99,10 @@ public class SQLCLI  extends Configured implements Tool {
           "ipAddress TEXT, " +
           "containerPort INTEGER NOT NULL);";
   private static final String INSERT_CONTAINER_INFO =
-      "INSERT INTO containerInfo (containerID, leaderUUID) " +
-          "VALUES (\"%d\", \"%s\")";
+      "INSERT INTO containerInfo (containerID, replicationType, "
+          + "replicationFactor, usedBytes, allocatedBytes, owner, "
+          + "numberOfKeys) VALUES (\"%d\", \"%s\", \"%s\", \"%d\", \"%d\", "
+          + "\"%s\", \"%d\")";
   private static final String INSERT_DATANODE_INFO =
       "INSERT INTO datanodeInfo (hostname, datanodeUUid, ipAddress, " +
           "containerPort) " +
@@ -469,10 +471,7 @@ public class SQLCLI  extends Configured implements Tool {
         .setConf(conf).setDbFile(dbFile).build();
         Connection conn = connectDB(outPath.toString())) {
       executeSQL(conn, CREATE_CONTAINER_INFO);
-      executeSQL(conn, CREATE_CONTAINER_MEMBERS);
-      executeSQL(conn, CREATE_DATANODE_INFO);
 
-      HashSet<String> uuidChecked = new HashSet<>();
       dbStore.iterate(null, (key, value) -> {
         long containerID = Longs.fromByteArray(key);
         ContainerInfo containerInfo = null;
@@ -481,8 +480,7 @@ public class SQLCLI  extends Configured implements Tool {
         Preconditions.checkNotNull(containerInfo);
         try {
           //TODO: include container state to sqllite schema
-          insertContainerDB(conn, containerID,
-              containerInfo.getPipeline().getProtobufMessage(), uuidChecked);
+          insertContainerDB(conn, containerInfo, containerID);
           return true;
         } catch (SQLException e) {
           throw new IOException(e);
@@ -494,38 +492,23 @@ public class SQLCLI  extends Configured implements Tool {
   /**
    * Insert into the sqlite DB of container.db.
    * @param conn the connection to the sqlite DB.
-   * @param containerID the id of the container.
-   * @param pipeline the actual container pipeline object.
-   * @param uuidChecked the uuid that has been already inserted.
+   * @param containerInfo
+   * @param containerID
    * @throws SQLException throws exception.
    */
-  private void insertContainerDB(Connection conn, long containerID,
-      Pipeline pipeline, Set<String> uuidChecked) throws SQLException {
+  private void insertContainerDB(Connection conn, ContainerInfo containerInfo,
+      long containerID) throws SQLException {
     LOG.info("Insert to sql container db, for container {}", containerID);
     String insertContainerInfo = String.format(
         INSERT_CONTAINER_INFO, containerID,
-        pipeline.getLeaderID());
-    executeSQL(conn, insertContainerInfo);
+        containerInfo.getReplicationType(),
+        containerInfo.getReplicationFactor(),
+        containerInfo.getUsedBytes(),
+        containerInfo.getAllocatedBytes(),
+        containerInfo.getOwner(),
+        containerInfo.getNumberOfKeys());
 
-    for (HddsProtos.DatanodeDetailsProto dd :
-        pipeline.getMembersList()) {
-      String uuid = dd.getUuid();
-      if (!uuidChecked.contains(uuid)) {
-        // we may also not use this checked set, but catch exception instead
-        // but this seems a bit cleaner.
-        String ipAddr = dd.getIpAddress();
-        String hostName = dd.getHostName();
-        int containerPort = DatanodeDetails.getFromProtoBuf(dd)
-            .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue();
-        String insertMachineInfo = String.format(
-            INSERT_DATANODE_INFO, hostName, uuid, ipAddr, containerPort);
-        executeSQL(conn, insertMachineInfo);
-        uuidChecked.add(uuid);
-      }
-      String insertContainerMembers = String.format(
-          INSERT_CONTAINER_MEMBERS, containerID, uuid);
-      executeSQL(conn, insertContainerMembers);
-    }
+    executeSQL(conn, insertContainerInfo);
     LOG.info("Insertion completed.");
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: HDFS-13714. Fix TestNameNodePrunesMissingStorages test failures on Windows. Contributed by Lukas Majercak.

Posted by vi...@apache.org.
HDFS-13714. Fix TestNameNodePrunesMissingStorages test failures on Windows. Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7296b644
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7296b644
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7296b644

Branch: refs/heads/HDFS-12090
Commit: 7296b644f7c44400f03995668573a4cfd2ee552c
Parents: fa9ef15
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Jul 2 16:36:25 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon Jul 2 16:36:25 2018 -0700

----------------------------------------------------------------------
 .../blockmanagement/TestNameNodePrunesMissingStorages.java      | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7296b644/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index 96d227d..05b6d30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -294,8 +294,9 @@ public class TestNameNodePrunesMissingStorages {
       in = null;
       out.close();
       out = null;
-      newVersionFile.renameTo(versionFile);
-      success = true;
+      // Delete old version file
+      success = versionFile.delete();
+      success &= newVersionFile.renameTo(versionFile);
     } finally {
       if (in != null) {
         in.close();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk

Posted by vi...@apache.org.
Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c163d179
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c163d179
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c163d179

Branch: refs/heads/HDFS-12090
Commit: c163d1797ade0f47d35b4a44381b8ef1dfec5b60
Parents: 0d9804d 99febe7
Author: Giovanni Matteo Fumarola <gi...@apache.com>
Authored: Thu Jul 5 10:55:05 2018 -0700
Committer: Giovanni Matteo Fumarola <gi...@apache.com>
Committed: Thu Jul 5 10:55:05 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  17 +-
 .../hadoop/yarn/api/records/Resource.java       |  13 +
 .../api/records/impl/LightWeightResource.java   |  23 +-
 .../scheduler/fair/ConfigurableResource.java    |  69 ++++-
 .../fair/FairSchedulerConfiguration.java        | 174 ++++++++++--
 .../allocation/AllocationFileQueueParser.java   |   2 +-
 .../resourcemanager/webapp/dao/AppInfo.java     |   2 +-
 .../webapp/dao/SchedulerInfo.java               |   8 +-
 .../fair/TestFairSchedulerConfiguration.java    | 160 ++++++++---
 .../webapp/TestRMWebServices.java               |  31 ++-
 .../webapp/TestRMWebServicesApps.java           |  14 +-
 ...estRMWebServicesAppsCustomResourceTypes.java | 242 +++++++++++++++++
 .../webapp/TestRMWebServicesCapacitySched.java  |  30 +-
 .../TestRMWebServicesConfigurationMutation.java |   5 +
 .../webapp/TestRMWebServicesFairScheduler.java  |  95 +++----
 .../TestRMWebServicesSchedulerActivities.java   |   2 +-
 ...ustomResourceTypesConfigurationProvider.java | 138 ++++++++++
 .../FairSchedulerJsonVerifications.java         | 139 ++++++++++
 .../FairSchedulerXmlVerifications.java          | 153 +++++++++++
 ...ervicesFairSchedulerCustomResourceTypes.java | 271 +++++++++++++++++++
 .../webapp/helper/AppInfoJsonVerifications.java | 123 +++++++++
 .../webapp/helper/AppInfoXmlVerifications.java  | 132 +++++++++
 .../webapp/helper/BufferedClientResponse.java   |  57 ++++
 .../helper/JsonCustomResourceTypeTestcase.java  |  77 ++++++
 .../ResourceRequestsJsonVerifications.java      | 252 +++++++++++++++++
 .../ResourceRequestsXmlVerifications.java       | 215 +++++++++++++++
 .../helper/XmlCustomResourceTypeTestCase.java   | 112 ++++++++
 .../src/site/markdown/FairScheduler.md          |   6 +-
 28 files changed, 2405 insertions(+), 157 deletions(-)
----------------------------------------------------------------------



---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDDS-235. Fix TestOzoneAuditLogger#verifyDefaultLogLevel. Contributed by Xiaoyu Yao.

Posted by vi...@apache.org.
HDDS-235. Fix TestOzoneAuditLogger#verifyDefaultLogLevel.
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/790c5635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/790c5635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/790c5635

Branch: refs/heads/HDFS-12090
Commit: 790c563511161c901b7b667e787baca8725f9249
Parents: 2f51cd6
Author: Anu Engineer <ae...@apache.org>
Authored: Sun Jul 8 11:27:54 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Sun Jul 8 11:27:54 2018 -0700

----------------------------------------------------------------------
 .../ozone/audit/TestOzoneAuditLogger.java       | 46 +++++++++++++-------
 1 file changed, 31 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/790c5635/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
index d3cc9e4..57a7d9e 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
@@ -100,7 +100,7 @@ public class TestOzoneAuditLogger {
     AUDIT.logReadFailure(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.ERROR);
     AUDIT.logReadFailure(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.ERROR,
         new Exception("test"));
-    verifyLog(null);
+    verifyNoLog();
   }
 
   /**
@@ -110,22 +110,38 @@ public class TestOzoneAuditLogger {
   public void notLogDebugEvents() throws IOException {
     AUDIT.logWriteSuccess(DummyAction.CREATE_VOLUME, auditableObj.toAuditMap(), Level.DEBUG);
     AUDIT.logReadSuccess(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.DEBUG);
-    verifyLog(null);
+    verifyNoLog();
   }
 
-  public void verifyLog(String expected) throws IOException {
-      File file = new File("audit.log");
-      List<String> lines = FileUtils.readLines(file, (String)null);
-      if(expected == null){
-        // When no log entry is expected, the log file must be empty
-        assertTrue(lines.size() == 0);
-      } else {
-        // When log entry is expected, the log file will contain one line and
-        // that must be equal to the expected string
-        assertTrue(expected.equalsIgnoreCase(lines.get(0)));
-        //empty the file
-        lines.remove(0);
-        FileUtils.writeLines(file, lines, false);
+  private void verifyLog(String expected) throws IOException {
+    File file = new File("audit.log");
+    List<String> lines = FileUtils.readLines(file, (String)null);
+      final int retry = 5;
+      int i = 0;
+      while (lines.isEmpty() && i < retry) {
+        lines = FileUtils.readLines(file, (String)null);
+        try {
+          Thread.sleep( 500 * (i + 1));
+        } catch(InterruptedException ie) {
+          Thread.currentThread().interrupt();
+          break;
+        }
+        i++;
       }
+
+      // When log entry is expected, the log file will contain one line and
+      // that must be equal to the expected string
+      assertTrue(lines.size() != 0);
+      assertTrue(expected.equalsIgnoreCase(lines.get(0)));
+      //empty the file
+      lines.remove(0);
+      FileUtils.writeLines(file, lines, false);
+  }
+
+  private void verifyNoLog() throws IOException {
+    File file = new File("audit.log");
+    List<String> lines = FileUtils.readLines(file, (String)null);
+    // When no log entry is expected, the log file must be empty
+    assertTrue(lines.size() == 0);
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: YARN-7556. Fair scheduler configuration should allow resource types in the minResources and maxResources properties. (Daniel Templeton and Szilard Nemeth via Haibo Chen)

Posted by vi...@apache.org.
YARN-7556. Fair scheduler configuration should allow resource types in the minResources and maxResources properties. (Daniel Templeton and Szilard Nemeth via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17262470
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17262470
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17262470

Branch: refs/heads/HDFS-12090
Commit: 17262470246232d0f0651d627a4961e55b1efe6a
Parents: 71df8c2
Author: Haibo Chen <ha...@apache.org>
Authored: Thu Jul 5 10:42:39 2018 -0700
Committer: Haibo Chen <ha...@apache.org>
Committed: Thu Jul 5 10:42:39 2018 -0700

----------------------------------------------------------------------
 .../dev-support/findbugs-exclude.xml            |  17 +-
 .../hadoop/yarn/api/records/Resource.java       |  13 ++
 .../api/records/impl/LightWeightResource.java   |  23 ++-
 .../scheduler/fair/ConfigurableResource.java    |  69 +++++++-
 .../fair/FairSchedulerConfiguration.java        | 174 ++++++++++++++++---
 .../allocation/AllocationFileQueueParser.java   |   2 +-
 .../fair/TestFairSchedulerConfiguration.java    | 151 ++++++++++++----
 .../src/site/markdown/FairScheduler.md          |   6 +-
 8 files changed, 385 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5841361..5cc81e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -67,11 +67,6 @@
   </Match>
   <Match>
     <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
-    <Method name="getLocalityStatistics" />
-    <Bug pattern="EI_EXPOSE_REP" />
-  </Match>
-  <Match>
-    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
     <Method name="incNumAllocatedContainers"/>
     <Bug pattern="VO_VOLATILE_INCREMENT" />
   </Match>
@@ -118,6 +113,18 @@
     <Bug pattern="BC_UNCONFIRMED_CAST" />
   </Match>
 
+  <!-- Ignore exposed internal representations -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.api.records.Resource" />
+    <Method name="getResources" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics" />
+    <Method name="getLocalityStatistics" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+
   <!-- Object cast is based on the event type -->
   <Match>
     <Class name="org.apache.hadoop.yarn.server.nodemanager.timelineservice.NMTimelinePublisher" />

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 71a6b54..173d4c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.impl.LightWeightResource;
@@ -75,6 +76,18 @@ public abstract class Resource implements Comparable<Resource> {
   @Private
   public static final int VCORES_INDEX = 1;
 
+  /**
+   * Return a new {@link Resource} instance with all resource values
+   * initialized to {@code value}.
+   * @param value the value to use for all resources
+   * @return a new {@link Resource} instance
+   */
+  @Private
+  @Unstable
+  public static Resource newInstance(long value) {
+    return new LightWeightResource(value);
+  }
+
   @Public
   @Stable
   public static Resource newInstance(int memory, int vCores) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
index a6e6432..77f77f3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/LightWeightResource.java
@@ -18,9 +18,8 @@
 
 package org.apache.hadoop.yarn.api.records.impl;
 
-import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
-import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -58,13 +57,29 @@ import static org.apache.hadoop.yarn.api.records.ResourceInformation.*;
  *
  * @see Resource
  */
-@InterfaceAudience.Private
+@Private
 @Unstable
 public class LightWeightResource extends Resource {
 
   private ResourceInformation memoryResInfo;
   private ResourceInformation vcoresResInfo;
 
+  /**
+   * Create a new {@link LightWeightResource} instance with all resource values
+   * initialized to {@code value}.
+   * @param value the value to use for all resources
+   */
+  public LightWeightResource(long value) {
+    ResourceInformation[] types = ResourceUtils.getResourceTypesArray();
+    initResourceInformations(value, value, types.length);
+
+    for (int i = 2; i < types.length; i++) {
+      resources[i] = new ResourceInformation();
+      ResourceInformation.copy(types[i], resources[i]);
+      resources[i].setValue(value);
+    }
+  }
+
   public LightWeightResource(long memory, int vcores) {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
@@ -91,7 +106,7 @@ public class LightWeightResource extends Resource {
     }
   }
 
-  private void initResourceInformations(long memory, int vcores,
+  private void initResourceInformations(long memory, long vcores,
       int numberOfKnownResourceTypes) {
     this.memoryResInfo = newDefaultInformation(MEMORY_URI, MEMORY_MB.getUnits(),
         memory);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
index ecdd011..0c3b0dd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/ConfigurableResource.java
@@ -18,9 +18,13 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import java.util.Arrays;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 
 /**
  * A {@code ConfigurableResource} object represents an entity that is used to
@@ -33,29 +37,53 @@ public class ConfigurableResource {
   private final Resource resource;
   private final double[] percentages;
 
-  public ConfigurableResource(double[] percentages) {
+  ConfigurableResource() {
+    this(getOneHundredPercentArray());
+  }
+
+  ConfigurableResource(double[] percentages) {
     this.percentages = percentages.clone();
     this.resource = null;
   }
 
+  ConfigurableResource(long value) {
+    this(Resource.newInstance(value));
+  }
+
   public ConfigurableResource(Resource resource) {
     this.percentages = null;
     this.resource = resource;
   }
 
+  private static double[] getOneHundredPercentArray() {
+    double[] resourcePercentages =
+        new double[ResourceUtils.getNumberOfKnownResourceTypes()];
+    Arrays.fill(resourcePercentages, 1.0);
+
+    return resourcePercentages;
+  }
+
   /**
    * Get resource by multiplying the cluster resource and the percentage of
    * each resource respectively. Return the absolute resource if either
    * {@code percentages} or {@code clusterResource} is null.
    *
    * @param clusterResource the cluster resource
-   * @return resource
+   * @return resource the resulting resource
    */
   public Resource getResource(Resource clusterResource) {
     if (percentages != null && clusterResource != null) {
       long memory = (long) (clusterResource.getMemorySize() * percentages[0]);
       int vcore = (int) (clusterResource.getVirtualCores() * percentages[1]);
-      return Resource.newInstance(memory, vcore);
+      Resource res = Resource.newInstance(memory, vcore);
+      ResourceInformation[] clusterInfo = clusterResource.getResources();
+
+      for (int i = 2; i < clusterInfo.length; i++) {
+        res.setResourceValue(i,
+            (long)(clusterInfo[i].getValue() * percentages[i]));
+      }
+
+      return res;
     } else {
       return resource;
     }
@@ -69,4 +97,39 @@ public class ConfigurableResource {
   public Resource getResource() {
     return resource;
   }
+
+  /**
+   * Set the value of the wrapped resource if this object isn't setup to use
+   * percentages. If this object is set to use percentages, this method has
+   * no effect.
+   *
+   * @param name the name of the resource
+   * @param value the value
+   */
+  void setValue(String name, long value) {
+    if (resource != null) {
+      resource.setResourceValue(name, value);
+    }
+  }
+
+  /**
+   * Set the percentage of the resource if this object is setup to use
+   * percentages. If this object is set to use percentages, this method has
+   * no effect.
+   *
+   * @param name the name of the resource
+   * @param value the percentage
+   */
+  void setPercentage(String name, double value) {
+    if (percentages != null) {
+      Integer index = ResourceUtils.getResourceTypeIndex().get(name);
+
+      if (index != null) {
+        percentages[index] = value;
+      } else {
+        throw new ResourceNotFoundException("The requested resource, \""
+            + name + "\", could not be found.");
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index b50e4bb..8c4932b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
@@ -213,6 +214,9 @@ public class FairSchedulerConfiguration extends Configuration {
           CONF_PREFIX + "reservable-nodes";
   public static final float RESERVABLE_NODES_DEFAULT = 0.05f;
 
+  private static final String INVALID_RESOURCE_DEFINITION_PREFIX =
+          "Error reading resource config--invalid resource definition: ";
+
   public FairSchedulerConfiguration() {
     super();
   }
@@ -407,54 +411,167 @@ public class FairSchedulerConfiguration extends Configuration {
   }
 
   /**
-   * Parses a resource config value of a form like "1024", "1024 mb",
-   * or "1024 mb, 3 vcores". If no units are given, megabytes are assumed.
-   * 
-   * @throws AllocationConfigurationException
+   * Parses a resource config value in one of three forms:
+   * <ol>
+   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
+   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
+   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
+   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
+   * </ol>
+   * In new style resources, any resource that is not specified will be
+   * set to {@link Long#MAX_VALUE} or 100%, as appropriate. Also, in the new
+   * style resources, units are not allowed. Units are assumed from the resource
+   * manager's settings for the resources when the value isn't a percentage.
+   *
+   * @param value the resource definition to parse
+   * @return a {@link ConfigurableResource} that represents the parsed value
+   * @throws AllocationConfigurationException if the raw value is not a valid
+   * resource definition
    */
-  public static ConfigurableResource parseResourceConfigValue(String val)
+  public static ConfigurableResource parseResourceConfigValue(String value)
       throws AllocationConfigurationException {
+    return parseResourceConfigValue(value, Long.MAX_VALUE);
+  }
+
+  /**
+   * Parses a resource config value in one of three forms:
+   * <ol>
+   * <li>Percentage: &quot;50%&quot; or &quot;40% memory, 60% cpu&quot;</li>
+   * <li>New style resources: &quot;vcores=10, memory-mb=1024&quot;
+   * or &quot;vcores=60%, memory-mb=40%&quot;</li>
+   * <li>Old style resources: &quot;1024 mb, 10 vcores&quot;</li>
+   * </ol>
+   * In new style resources, any resource that is not specified will be
+   * set to {@code missing} or 0%, as appropriate. Also, in the new style
+   * resources, units are not allowed. Units are assumed from the resource
+   * manager's settings for the resources when the value isn't a percentage.
+   *
+   * The {@code missing} parameter is only used in the case of new style
+   * resources without percentages. With new style resources with percentages,
+   * any missing resources will be assumed to be 100% because percentages are
+   * only used with maximum resource limits.
+   *
+   * @param value the resource definition to parse
+   * @param missing the value to use for any unspecified resources
+   * @return a {@link ConfigurableResource} that represents the parsed value
+   * @throws AllocationConfigurationException if the raw value is not a valid
+   * resource definition
+   */
+  public static ConfigurableResource parseResourceConfigValue(String value,
+      long missing) throws AllocationConfigurationException {
     ConfigurableResource configurableResource;
+
+    if (value.trim().isEmpty()) {
+      throw new AllocationConfigurationException("Error reading resource "
+          + "config--the resource string is empty.");
+    }
+
     try {
-      val = StringUtils.toLowerCase(val);
-      if (val.contains("%")) {
-        configurableResource = new ConfigurableResource(
-            getResourcePercentage(val));
+      if (value.contains("=")) {
+        configurableResource = parseNewStyleResource(value, missing);
+      } else if (value.contains("%")) {
+        configurableResource = parseOldStyleResourceAsPercentage(value);
       } else {
-        int memory = findResource(val, "mb");
-        int vcores = findResource(val, "vcores");
-        configurableResource = new ConfigurableResource(
-            BuilderUtils.newResource(memory, vcores));
+        configurableResource = parseOldStyleResource(value);
       }
-    } catch (AllocationConfigurationException ex) {
-      throw ex;
-    } catch (Exception ex) {
+    } catch (RuntimeException ex) {
       throw new AllocationConfigurationException(
           "Error reading resource config", ex);
     }
+
+    return configurableResource;
+  }
+
+  private static ConfigurableResource parseNewStyleResource(String value,
+          long missing) throws AllocationConfigurationException {
+
+    final ConfigurableResource configurableResource;
+    boolean asPercent = value.contains("%");
+    if (asPercent) {
+      configurableResource = new ConfigurableResource();
+    } else {
+      configurableResource = new ConfigurableResource(missing);
+    }
+
+    String[] resources = value.split(",");
+    for (String resource : resources) {
+      String[] parts = resource.split("=");
+
+      if (parts.length != 2) {
+        throw createConfigException(value,
+                        "Every resource must be of the form: name=value.");
+      }
+
+      String resourceName = parts[0].trim();
+      String resourceValue = parts[1].trim();
+      try {
+        if (asPercent) {
+          configurableResource.setPercentage(resourceName,
+              findPercentage(resourceValue, ""));
+        } else {
+          configurableResource.setValue(resourceName,
+              Long.parseLong(resourceValue));
+        }
+      } catch (ResourceNotFoundException ex) {
+        throw createConfigException(value, "The "
+            + "resource name, \"" + resourceName + "\" was not "
+            + "recognized. Please check the value of "
+            + YarnConfiguration.RESOURCE_TYPES + " in the Resource "
+            + "Manager's configuration files.", ex);
+      } catch (NumberFormatException ex) {
+        // This only comes from Long.parseLong()
+        throw createConfigException(value, "The "
+            + "resource values must all be integers. \"" + resourceValue
+            + "\" is not an integer.", ex);
+      } catch (AllocationConfigurationException ex) {
+        // This only comes from findPercentage()
+        throw createConfigException(value, "The "
+            + "resource values must all be percentages. \""
+            + resourceValue + "\" is either not a number or does not "
+            + "include the '%' symbol.", ex);
+      }
+    }
     return configurableResource;
   }
 
+  private static ConfigurableResource parseOldStyleResourceAsPercentage(
+          String value) throws AllocationConfigurationException {
+    return new ConfigurableResource(
+            getResourcePercentage(StringUtils.toLowerCase(value)));
+  }
+
+  private static ConfigurableResource parseOldStyleResource(String value)
+          throws AllocationConfigurationException {
+    final String lCaseValue = StringUtils.toLowerCase(value);
+    int memory = findResource(lCaseValue, "mb");
+    int vcores = findResource(lCaseValue, "vcores");
+
+    return new ConfigurableResource(
+            BuilderUtils.newResource(memory, vcores));
+  }
+
   private static double[] getResourcePercentage(
       String val) throws AllocationConfigurationException {
     int numberOfKnownResourceTypes = ResourceUtils
         .getNumberOfKnownResourceTypes();
     double[] resourcePercentage = new double[numberOfKnownResourceTypes];
     String[] strings = val.split(",");
+
     if (strings.length == 1) {
       double percentage = findPercentage(strings[0], "");
       for (int i = 0; i < numberOfKnownResourceTypes; i++) {
-        resourcePercentage[i] = percentage/100;
+        resourcePercentage[i] = percentage;
       }
     } else {
-      resourcePercentage[0] = findPercentage(val, "memory")/100;
-      resourcePercentage[1] = findPercentage(val, "cpu")/100;
+      resourcePercentage[0] = findPercentage(val, "memory");
+      resourcePercentage[1] = findPercentage(val, "cpu");
     }
+
     return resourcePercentage;
   }
 
   private static double findPercentage(String val, String units)
-    throws AllocationConfigurationException {
+      throws AllocationConfigurationException {
     final Pattern pattern =
         Pattern.compile("((\\d+)(\\.\\d*)?)\\s*%\\s*" + units);
     Matcher matcher = pattern.matcher(val);
@@ -467,7 +584,22 @@ public class FairSchedulerConfiguration extends Configuration {
             units);
       }
     }
-    return Double.parseDouble(matcher.group(1));
+    return Double.parseDouble(matcher.group(1)) / 100.0;
+  }
+
+  private static AllocationConfigurationException createConfigException(
+          String value, String message) {
+    return createConfigException(value, message, null);
+  }
+
+  private static AllocationConfigurationException createConfigException(
+      String value, String message, Throwable t) {
+    String msg = INVALID_RESOURCE_DEFINITION_PREFIX + value + ". " + message;
+    if (t != null) {
+      return new AllocationConfigurationException(msg, t);
+    } else {
+      return new AllocationConfigurationException(msg);
+    }
   }
 
   public long getUpdateInterval() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
index d5a436e..441c34a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/allocation/AllocationFileQueueParser.java
@@ -134,7 +134,7 @@ public class AllocationFileQueueParser {
       if (MIN_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);
         ConfigurableResource val =
-            FairSchedulerConfiguration.parseResourceConfigValue(text);
+            FairSchedulerConfiguration.parseResourceConfigValue(text, 0L);
         builder.minQueueResources(queueName, val.getResource());
       } else if (MAX_RESOURCES.equals(field.getTagName())) {
         String text = getTrimmedTextData(field);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
index 481645b..76a5af5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerConfiguration.java
@@ -102,60 +102,145 @@ public class TestFairSchedulerConfiguration {
 
   @Test
   public void testParseResourceConfigValue() throws Exception {
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("2 vcores, 1024 mb").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024 mb, 2 vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("2vcores,1024mb").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024mb,2vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024   mb, 2    vcores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("1024 Mb, 2 vCores").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024 mb, 2 vcores  ").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024.3 mb, 2.35 vcores  ").getResource());
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("  1024. mb, 2. vcores  ").getResource());
-
-    Resource clusterResource = BuilderUtils.newResource(2048, 4);
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    Resource expected = BuilderUtils.newResource(5 * 1024, 2);
+    Resource clusterResource = BuilderUtils.newResource(10 * 1024, 4);
+
+    assertEquals(expected,
+        parseResourceConfigValue("2 vcores, 5120 mb").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120 mb, 2 vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("2vcores,5120mb").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120mb,2vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120mb   mb, 2    vcores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("5120 Mb, 2 vCores").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120 mb, 2 vcores  ").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120.3 mb, 2.35 vcores  ").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("  5120. mb, 2. vcores  ").getResource());
+
+    assertEquals(expected,
         parseResourceConfigValue("50% memory, 50% cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50% Memory, 50% CpU").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
-        parseResourceConfigValue("50%").getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 4),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
         parseResourceConfigValue("50% memory, 100% cpu").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 4),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 4),
         parseResourceConfigValue(" 100% cpu, 50% memory").
         getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 0),
+    assertEquals(BuilderUtils.newResource(5 * 1024, 0),
         parseResourceConfigValue("50% memory, 0% cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50 % memory, 50 % cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50%memory,50%cpu").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("  50  %  memory,  50  %  cpu  ").
             getResource(clusterResource));
-    assertEquals(BuilderUtils.newResource(1024, 2),
+    assertEquals(expected,
         parseResourceConfigValue("50.% memory, 50.% cpu").
             getResource(clusterResource));
-
-    clusterResource =  BuilderUtils.newResource(1024 * 10, 4);
     assertEquals(BuilderUtils.newResource((int)(1024 * 10 * 0.109), 2),
         parseResourceConfigValue("10.9% memory, 50.6% cpu").
             getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("50%").getResource(clusterResource));
+
+    Configuration conf = new Configuration();
+
+    conf.set(YarnConfiguration.RESOURCE_TYPES, "test1");
+    ResourceUtils.resetResourceTypes(conf);
+
+    clusterResource = BuilderUtils.newResource(10 * 1024, 4);
+    expected = BuilderUtils.newResource(5 * 1024, 2);
+    expected.setResourceValue("test1", Long.MAX_VALUE);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, vcores=2").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120").getResource());
+    assertEquals(expected, parseResourceConfigValue(" vcores = 2 , "
+            + "memory-mb = 5120 ").getResource());
+
+    expected.setResourceValue("test1", 0L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, vcores=2", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120", 0L).getResource());
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 ",
+            0L).getResource());
+
+    clusterResource.setResourceValue("test1", 8L);
+    expected.setResourceValue("test1", 4L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2, memory-mb=5120, "
+            + "test1=4").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("test1=4, vcores=2, "
+            + "memory-mb=5120").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=5120, test1=4, "
+            + "vcores=2").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=2,memory-mb=5120,"
+            + "test1=4").getResource());
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 , "
+            + "test1 = 4 ").getResource());
+
+    expected = BuilderUtils.newResource(4 * 1024, 3);
+    expected.setResourceValue("test1", 8L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%, "
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=40%, "
+            + "vcores=75%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%,"
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 75 % , "
+            + "memory-mb = 40 % ").getResource(clusterResource));
+
+    expected.setResourceValue("test1", 4L);
+
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%, memory-mb=40%, "
+            + "test1=50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("test1=50%, vcores=75%, "
+            + "memory-mb=40%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("memory-mb=40%, test1=50%, "
+            + "vcores=75%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue("vcores=75%,memory-mb=40%,"
+            + "test1=50%").getResource(clusterResource));
+    assertEquals(expected,
+        parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , "
+            + "test1 = 50 % ").getResource(clusterResource));
   }
   
   @Test(expected = AllocationConfigurationException.class)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/17262470/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 269f5b4..b5bcbf5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,11 +86,11 @@ The allocation file must be in XML format. The format contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an optional attribute 'type', which when set to 'parent' makes it a parent queue. This is useful when we want to create a parent queue without configuring any leaf queues. Each queue element may contain the following properties:
 
-    * **minResources**: minimum resources the queue is entitled to, in the form "X mb, Y vcores". For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and minimum. Note that it is possible that a queue that is below its minimum may not immediately get up to its minimum when it submits an application, because already-running jobs may be using those resources.
+    * **minResources**: minimum resources the queue is entitled to, in the form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is required when specifying resources other than memory and CPU. For the single-resource fairness policy, the vcores value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory share. Under dominant resource fairness, a queue is considered unsatisfied if its usage for its dominant resource with respect to the cluster capacity is below its minimum share for that resource. If multiple queues are unsatisfied in this situation, resources go to the queue with the smallest ratio between relevant resource usage and its minimum. Note that it is possible for a queue that is below its minimum to not immediately get up to its minimum when an a
 pplication is submitted to the queue, because already-running jobs may be using those resources.
 
-    * **maxResources**: maximum resources a queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). A queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxResources**: maximum resources a queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. A queue will not be assigned a container that would put its aggregate usage over this limit.
 
-    * **maxChildResources**: maximum resources an ad hoc child queue is allocated, expressed either in absolute values (X mb, Y vcores) or as a percentage of the cluster resources (X% memory, Y% cpu). An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
+    * **maxChildResources**: maximum resources an ad hoc child queue will allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying resources other than memory and CPU. In the last form, X and Y can either be a percentage or an integer resource value without units. In the latter case the units will be inferred from the default units configured for that resource. An ad hoc child queue will not be assigned a container that would put its aggregate usage over this limit.
 
     * **maxRunningApps**: limit the number of apps from the queue to run at once
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HDDS-175. Refactor ContainerInfo to remove Pipeline object from it. Contributed by Ajay Kumar.

Posted by vi...@apache.org.
HDDS-175. Refactor ContainerInfo to remove Pipeline object from it.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ca4f0ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ca4f0ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ca4f0ce

Branch: refs/heads/HDFS-12090
Commit: 7ca4f0cefa220c752920822c8d16469ab3b09b37
Parents: 93ac01c
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Jul 3 13:30:19 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Jul 3 14:11:52 2018 -0700

----------------------------------------------------------------------
 .../scm/client/ContainerOperationClient.java    | 109 +++++++++---
 .../hadoop/hdds/scm/client/ScmClient.java       |  38 ++++-
 .../container/common/helpers/ContainerInfo.java | 167 +++++++++++++------
 .../common/helpers/ContainerWithPipeline.java   | 131 +++++++++++++++
 .../StorageContainerLocationProtocol.java       |  13 +-
 ...rLocationProtocolClientSideTranslatorPB.java |  26 ++-
 ...rLocationProtocolServerSideTranslatorPB.java |  25 ++-
 .../StorageContainerLocationProtocol.proto      |  15 +-
 hadoop-hdds/common/src/main/proto/hdds.proto    |   9 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java |  80 +++++----
 .../block/DatanodeDeletedBlockTransactions.java |  11 +-
 .../container/CloseContainerEventHandler.java   |  26 ++-
 .../hdds/scm/container/ContainerMapping.java    | 128 +++++++++++---
 .../scm/container/ContainerStateManager.java    |  30 +++-
 .../hadoop/hdds/scm/container/Mapping.java      |  26 ++-
 .../scm/container/closer/ContainerCloser.java   |  15 +-
 .../scm/container/states/ContainerStateMap.java |   7 +-
 .../hdds/scm/pipelines/PipelineManager.java     |  27 ++-
 .../hdds/scm/pipelines/PipelineSelector.java    |  16 ++
 .../scm/pipelines/ratis/RatisManagerImpl.java   |   1 +
 .../standalone/StandaloneManagerImpl.java       |   1 +
 .../scm/server/SCMClientProtocolServer.java     |  14 +-
 .../hdds/scm/block/TestDeletedBlockLog.java     |  15 +-
 .../TestCloseContainerEventHandler.java         |  31 ++--
 .../scm/container/TestContainerMapping.java     |  27 +--
 .../container/closer/TestContainerCloser.java   |  18 +-
 .../hdds/scm/node/TestContainerPlacement.java   |   6 +-
 .../cli/container/CloseContainerHandler.java    |  10 +-
 .../cli/container/DeleteContainerHandler.java   |   9 +-
 .../scm/cli/container/InfoContainerHandler.java |  11 +-
 .../ozone/client/io/ChunkGroupInputStream.java  |  15 +-
 .../ozone/client/io/ChunkGroupOutputStream.java |   9 +-
 .../hadoop/ozone/protocolPB/OzonePBHelper.java  |  30 ++++
 .../container/TestContainerStateManager.java    | 161 ++++++++++--------
 .../hadoop/ozone/TestContainerOperations.java   |  11 +-
 .../ozone/TestStorageContainerManager.java      |   6 +-
 .../TestStorageContainerManagerHelper.java      |  10 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java    |   4 +-
 .../TestCloseContainerByPipeline.java           |  21 +--
 .../ozone/ksm/TestContainerReportWithKeys.java  |   2 +-
 .../hadoop/ozone/scm/TestAllocateContainer.java |   6 +-
 .../ozone/scm/TestContainerSmallFile.java       |  36 ++--
 .../org/apache/hadoop/ozone/scm/TestSCMCli.java | 135 ++++++++-------
 .../ozone/scm/TestXceiverClientManager.java     |  62 ++++---
 .../ozone/scm/TestXceiverClientMetrics.java     |  14 +-
 .../genesis/BenchMarkContainerStateMap.java     |  16 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java |  63 +++----
 47 files changed, 1139 insertions(+), 504 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 07f6cec..b04f8c4 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.scm.client;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.protocolPB
@@ -87,16 +88,17 @@ public class ContainerOperationClient implements ScmClient {
    * @inheritDoc
    */
   @Override
-  public ContainerInfo createContainer(String owner)
+  public ContainerWithPipeline createContainer(String owner)
       throws IOException {
     XceiverClientSpi client = null;
     try {
-      ContainerInfo container =
+      ContainerWithPipeline containerWithPipeline =
           storageContainerLocationClient.allocateContainer(
               xceiverClientManager.getType(),
               xceiverClientManager.getFactor(), owner);
-      Pipeline pipeline = container.getPipeline();
-      client = xceiverClientManager.acquireClient(pipeline, container.getContainerID());
+      Pipeline pipeline = containerWithPipeline.getPipeline();
+      client = xceiverClientManager.acquireClient(pipeline,
+          containerWithPipeline.getContainerInfo().getContainerID());
 
       // Allocated State means that SCM has allocated this pipeline in its
       // namespace. The client needs to create the pipeline on the machines
@@ -106,8 +108,9 @@ public class ContainerOperationClient implements ScmClient {
       if (pipeline.getLifeCycleState() == ALLOCATED) {
         createPipeline(client, pipeline);
       }
-      createContainer(client, container.getContainerID());
-      return container;
+      createContainer(client,
+          containerWithPipeline.getContainerInfo().getContainerID());
+      return containerWithPipeline;
     } finally {
       if (client != null) {
         xceiverClientManager.releaseClient(client);
@@ -197,17 +200,17 @@ public class ContainerOperationClient implements ScmClient {
    * @inheritDoc
    */
   @Override
-  public ContainerInfo createContainer(HddsProtos.ReplicationType type,
+  public ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
       HddsProtos.ReplicationFactor factor, String owner) throws IOException {
     XceiverClientSpi client = null;
     try {
       // allocate container on SCM.
-      ContainerInfo container =
+      ContainerWithPipeline containerWithPipeline =
           storageContainerLocationClient.allocateContainer(type, factor,
               owner);
-      Pipeline pipeline = container.getPipeline();
+      Pipeline pipeline = containerWithPipeline.getPipeline();
       client = xceiverClientManager.acquireClient(pipeline,
-          container.getContainerID());
+          containerWithPipeline.getContainerInfo().getContainerID());
 
       // Allocated State means that SCM has allocated this pipeline in its
       // namespace. The client needs to create the pipeline on the machines
@@ -217,9 +220,10 @@ public class ContainerOperationClient implements ScmClient {
       }
       // connect to pipeline leader and allocate container on leader datanode.
       client = xceiverClientManager.acquireClient(pipeline,
-          container.getContainerID());
-      createContainer(client, container.getContainerID());
-      return container;
+          containerWithPipeline.getContainerInfo().getContainerID());
+      createContainer(client,
+          containerWithPipeline.getContainerInfo().getContainerID());
+      return containerWithPipeline;
     } finally {
       if (client != null) {
         xceiverClientManager.releaseClient(client);
@@ -256,24 +260,27 @@ public class ContainerOperationClient implements ScmClient {
   }
 
   /**
-   * Delete the container, this will release any resource it uses.
-   * @param pipeline - Pipeline that represents the container.
-   * @param force - True to forcibly delete the container.
+   * Deletes an existing container.
+   *
+   * @param containerId - ID of the container.
+   * @param pipeline    - Pipeline that represents the container.
+   * @param force       - true to forcibly delete the container.
    * @throws IOException
    */
   @Override
-  public void deleteContainer(long containerID, Pipeline pipeline, boolean force)
-      throws IOException {
+  public void deleteContainer(long containerId, Pipeline pipeline,
+      boolean force) throws IOException {
     XceiverClientSpi client = null;
     try {
-      client = xceiverClientManager.acquireClient(pipeline, containerID);
+      client = xceiverClientManager.acquireClient(pipeline, containerId);
       String traceID = UUID.randomUUID().toString();
-      ContainerProtocolCalls.deleteContainer(client, containerID, force, traceID);
+      ContainerProtocolCalls
+          .deleteContainer(client, containerId, force, traceID);
       storageContainerLocationClient
-          .deleteContainer(containerID);
+          .deleteContainer(containerId);
       if (LOG.isDebugEnabled()) {
         LOG.debug("Deleted container {}, leader: {}, machines: {} ",
-            containerID,
+            containerId,
             pipeline.getLeader(),
             pipeline.getMachines());
       }
@@ -285,6 +292,19 @@ public class ContainerOperationClient implements ScmClient {
   }
 
   /**
+   * Delete the container, this will release any resource it uses.
+   * @param containerID - containerID.
+   * @param force - True to forcibly delete the container.
+   * @throws IOException
+   */
+  @Override
+  public void deleteContainer(long containerID, boolean force)
+      throws IOException {
+    ContainerWithPipeline info = getContainerWithPipeline(containerID);
+    deleteContainer(containerID, info.getPipeline(), force);
+  }
+
+  /**
    * {@inheritDoc}
    */
   @Override
@@ -297,9 +317,9 @@ public class ContainerOperationClient implements ScmClient {
   /**
    * Get meta data from an existing container.
    *
-   * @param pipeline - pipeline that represents the container.
-   * @return ContainerInfo - a message of protobuf which has basic info
-   * of a container.
+   * @param containerID - ID of the container.
+   * @param pipeline    - Pipeline where the container is located.
+   * @return ContainerInfo
    * @throws IOException
    */
   @Override
@@ -326,6 +346,19 @@ public class ContainerOperationClient implements ScmClient {
   }
 
   /**
+   * Get meta data from an existing container.
+   * @param containerID - ID of the container.
+   * @return ContainerInfo - a message of protobuf which has basic info
+   * of a container.
+   * @throws IOException
+   */
+  @Override
+  public ContainerData readContainer(long containerID) throws IOException {
+    ContainerWithPipeline info = getContainerWithPipeline(containerID);
+    return readContainer(containerID, info.getPipeline());
+  }
+
+  /**
    * Given an id, return the pipeline associated with the container.
    * @param containerId - String Container ID
    * @return Pipeline of the existing container, corresponding to the given id.
@@ -338,6 +371,19 @@ public class ContainerOperationClient implements ScmClient {
   }
 
   /**
+   * Gets a container by Name -- Throws if the container does not exist.
+   *
+   * @param containerId - Container ID
+   * @return ContainerWithPipeline
+   * @throws IOException
+   */
+  @Override
+  public ContainerWithPipeline getContainerWithPipeline(long containerId)
+      throws IOException {
+    return storageContainerLocationClient.getContainerWithPipeline(containerId);
+  }
+
+  /**
    * Close a container.
    *
    * @param pipeline the container to be closed.
@@ -392,6 +438,19 @@ public class ContainerOperationClient implements ScmClient {
   }
 
   /**
+   * Close a container.
+   *
+   * @throws IOException
+   */
+  @Override
+  public void closeContainer(long containerId)
+      throws IOException {
+    ContainerWithPipeline info = getContainerWithPipeline(containerId);
+    Pipeline pipeline = info.getPipeline();
+    closeContainer(containerId, pipeline);
+  }
+
+  /**
    * Get the the current usage information.
    * @param containerID - ID of the container.
    * @return the size of the given container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index b52819a..ecb2173 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdds.scm.client;
 
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -45,7 +46,7 @@ public interface ScmClient {
    * @return ContainerInfo
    * @throws IOException
    */
-  ContainerInfo createContainer(String owner) throws IOException;
+  ContainerWithPipeline createContainer(String owner) throws IOException;
 
   /**
    * Gets a container by Name -- Throws if the container does not exist.
@@ -56,6 +57,14 @@ public interface ScmClient {
   ContainerInfo getContainer(long containerId) throws IOException;
 
   /**
+   * Gets a container by Name -- Throws if the container does not exist.
+   * @param containerId - Container ID
+   * @return ContainerWithPipeline
+   * @throws IOException
+   */
+  ContainerWithPipeline getContainerWithPipeline(long containerId) throws IOException;
+
+  /**
    * Close a container.
    *
    * @param containerId - ID of the container.
@@ -65,6 +74,14 @@ public interface ScmClient {
   void closeContainer(long containerId, Pipeline pipeline) throws IOException;
 
   /**
+   * Close a container.
+   *
+   * @param containerId - ID of the container.
+   * @throws IOException
+   */
+  void closeContainer(long containerId) throws IOException;
+
+  /**
    * Deletes an existing container.
    * @param containerId - ID of the container.
    * @param pipeline - Pipeline that represents the container.
@@ -74,6 +91,14 @@ public interface ScmClient {
   void deleteContainer(long containerId, Pipeline pipeline, boolean force) throws IOException;
 
   /**
+   * Deletes an existing container.
+   * @param containerId - ID of the container.
+   * @param force - true to forcibly delete the container.
+   * @throws IOException
+   */
+  void deleteContainer(long containerId, boolean force) throws IOException;
+
+  /**
    * Lists a range of containers and get their info.
    *
    * @param startContainerID start containerID.
@@ -96,6 +121,15 @@ public interface ScmClient {
       throws IOException;
 
   /**
+   * Read meta data from an existing container.
+   * @param containerID - ID of the container.
+   * @return ContainerInfo
+   * @throws IOException
+   */
+  ContainerData readContainer(long containerID)
+      throws IOException;
+
+  /**
    * Gets the container size -- Computed by SCM from Container Reports.
    * @param containerID - ID of the container.
    * @return number of bytes used by this container.
@@ -110,7 +144,7 @@ public interface ScmClient {
    * @return ContainerInfo
    * @throws IOException - in case of error.
    */
-  ContainerInfo createContainer(HddsProtos.ReplicationType type,
+  ContainerWithPipeline createContainer(HddsProtos.ReplicationType type,
       HddsProtos.ReplicationFactor replicationFactor,
       String owner) throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index ee05c87..9593717 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -15,34 +15,39 @@
  *  See the License for the specific language governing permissions and
  *  limitations under the License.
  */
-
 package org.apache.hadoop.hdds.scm.container.common.helpers;
 
+import static java.lang.Math.max;
+
 import com.fasterxml.jackson.annotation.JsonAutoDetect;
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.fasterxml.jackson.annotation.PropertyAccessor;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectWriter;
 import com.google.common.base.Preconditions;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Arrays;
+import java.util.Comparator;
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.util.Time;
 
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Comparator;
-
-import static java.lang.Math.max;
-
 /**
  * Class wraps ozone container info.
  */
-public class ContainerInfo
-    implements Comparator<ContainerInfo>, Comparable<ContainerInfo> {
+public class ContainerInfo implements Comparator<ContainerInfo>,
+    Comparable<ContainerInfo>, Externalizable {
 
   private static final ObjectWriter WRITER;
+  private static final String SERIALIZATION_ERROR_MSG = "Java serialization not"
+      + " supported. Use protobuf instead.";
 
   static {
     ObjectMapper mapper = new ObjectMapper();
@@ -53,7 +58,9 @@ public class ContainerInfo
   }
 
   private HddsProtos.LifeCycleState state;
-  private Pipeline pipeline;
+  private String pipelineName;
+  private ReplicationFactor replicationFactor;
+  private ReplicationType replicationType;
   // Bytes allocated by SCM for clients.
   private long allocatedBytes;
   // Actual container usage, updated through heartbeat.
@@ -75,15 +82,17 @@ public class ContainerInfo
   ContainerInfo(
       long containerID,
       HddsProtos.LifeCycleState state,
-      Pipeline pipeline,
+      String pipelineName,
       long allocatedBytes,
       long usedBytes,
       long numberOfKeys,
       long stateEnterTime,
       String owner,
-      long deleteTransactionId) {
+      long deleteTransactionId,
+      ReplicationFactor replicationFactor,
+      ReplicationType repType) {
     this.containerID = containerID;
-    this.pipeline = pipeline;
+    this.pipelineName = pipelineName;
     this.allocatedBytes = allocatedBytes;
     this.usedBytes = usedBytes;
     this.numberOfKeys = numberOfKeys;
@@ -92,6 +101,8 @@ public class ContainerInfo
     this.stateEnterTime = stateEnterTime;
     this.owner = owner;
     this.deleteTransactionId = deleteTransactionId;
+    this.replicationFactor = replicationFactor;
+    this.replicationType = repType;
   }
 
   /**
@@ -102,16 +113,18 @@ public class ContainerInfo
 
   public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) {
     ContainerInfo.Builder builder = new ContainerInfo.Builder();
-    builder.setPipeline(Pipeline.getFromProtoBuf(info.getPipeline()));
-    builder.setAllocatedBytes(info.getAllocatedBytes());
-    builder.setUsedBytes(info.getUsedBytes());
-    builder.setNumberOfKeys(info.getNumberOfKeys());
-    builder.setState(info.getState());
-    builder.setStateEnterTime(info.getStateEnterTime());
-    builder.setOwner(info.getOwner());
-    builder.setContainerID(info.getContainerID());
-    builder.setDeleteTransactionId(info.getDeleteTransactionId());
-    return builder.build();
+    return builder.setPipelineName(info.getPipelineName())
+        .setAllocatedBytes(info.getAllocatedBytes())
+        .setUsedBytes(info.getUsedBytes())
+        .setNumberOfKeys(info.getNumberOfKeys())
+        .setState(info.getState())
+        .setStateEnterTime(info.getStateEnterTime())
+        .setOwner(info.getOwner())
+        .setContainerID(info.getContainerID())
+        .setDeleteTransactionId(info.getDeleteTransactionId())
+        .setReplicationFactor(info.getReplicationFactor())
+        .setReplicationType(info.getReplicationType())
+        .build();
   }
 
   public long getContainerID() {
@@ -130,8 +143,12 @@ public class ContainerInfo
     return stateEnterTime;
   }
 
-  public Pipeline getPipeline() {
-    return pipeline;
+  public ReplicationFactor getReplicationFactor() {
+    return replicationFactor;
+  }
+
+  public String getPipelineName() {
+    return pipelineName;
   }
 
   public long getAllocatedBytes() {
@@ -177,6 +194,10 @@ public class ContainerInfo
     return lastUsed;
   }
 
+  public ReplicationType getReplicationType() {
+    return replicationType;
+  }
+
   public void updateLastUsedTime() {
     lastUsed = Time.monotonicNow();
   }
@@ -190,19 +211,17 @@ public class ContainerInfo
   public HddsProtos.SCMContainerInfo getProtobuf() {
     HddsProtos.SCMContainerInfo.Builder builder =
         HddsProtos.SCMContainerInfo.newBuilder();
-    builder.setPipeline(getPipeline().getProtobufMessage());
-    builder.setAllocatedBytes(getAllocatedBytes());
-    builder.setUsedBytes(getUsedBytes());
-    builder.setNumberOfKeys(getNumberOfKeys());
-    builder.setState(state);
-    builder.setStateEnterTime(stateEnterTime);
-    builder.setContainerID(getContainerID());
-    builder.setDeleteTransactionId(deleteTransactionId);
-
-    if (getOwner() != null) {
-      builder.setOwner(getOwner());
-    }
-    return builder.build();
+    return builder.setAllocatedBytes(getAllocatedBytes())
+        .setContainerID(getContainerID())
+        .setUsedBytes(getUsedBytes())
+        .setNumberOfKeys(getNumberOfKeys()).setState(getState())
+        .setStateEnterTime(getStateEnterTime()).setContainerID(getContainerID())
+        .setDeleteTransactionId(getDeleteTransactionId())
+        .setPipelineName(getPipelineName())
+        .setReplicationFactor(getReplicationFactor())
+        .setReplicationType(getReplicationType())
+        .setOwner(getOwner())
+        .build();
   }
 
   public String getOwner() {
@@ -217,7 +236,7 @@ public class ContainerInfo
   public String toString() {
     return "ContainerInfo{"
         + "state=" + state
-        + ", pipeline=" + pipeline
+        + ", pipelineName=" + pipelineName
         + ", stateEnterTime=" + stateEnterTime
         + ", owner=" + owner
         + '}';
@@ -252,9 +271,7 @@ public class ContainerInfo
   public int hashCode() {
     return new HashCodeBuilder(11, 811)
         .append(getContainerID())
-        .append(pipeline.getFactor())
-        .append(pipeline.getType())
-        .append(owner)
+        .append(getOwner())
         .toHashCode();
   }
 
@@ -327,12 +344,44 @@ public class ContainerInfo
       this.data = Arrays.copyOf(data, data.length);
     }
   }
+
+  /**
+   * Throws IOException as default java serialization is not supported. Use
+   * serialization via protobuf instead.
+   *
+   * @param out the stream to write the object to
+   * @throws IOException Includes any I/O exceptions that may occur
+   * @serialData Overriding methods should use this tag to describe
+   * the data layout of this Externalizable object.
+   * List the sequence of element types and, if possible,
+   * relate the element to a public/protected field and/or
+   * method of this Externalizable class.
+   */
+  @Override
+  public void writeExternal(ObjectOutput out) throws IOException {
+    throw new IOException(SERIALIZATION_ERROR_MSG);
+  }
+
+  /**
+   * Throws IOException as default java serialization is not supported. Use
+   * serialization via protobuf instead.
+   *
+   * @param in the stream to read data from in order to restore the object
+   * @throws IOException            if I/O errors occur
+   * @throws ClassNotFoundException If the class for an object being
+   *                                restored cannot be found.
+   */
+  @Override
+  public void readExternal(ObjectInput in)
+      throws IOException, ClassNotFoundException {
+    throw new IOException(SERIALIZATION_ERROR_MSG);
+  }
+
   /**
    * Builder class for ContainerInfo.
    */
   public static class Builder {
     private HddsProtos.LifeCycleState state;
-    private Pipeline pipeline;
     private long allocated;
     private long used;
     private long keys;
@@ -340,6 +389,25 @@ public class ContainerInfo
     private String owner;
     private long containerID;
     private long deleteTransactionId;
+    private String pipelineName;
+    private ReplicationFactor replicationFactor;
+    private ReplicationType replicationType;
+
+    public Builder setReplicationType(
+        ReplicationType replicationType) {
+      this.replicationType = replicationType;
+      return this;
+    }
+
+    public Builder setPipelineName(String pipelineName) {
+      this.pipelineName = pipelineName;
+      return this;
+    }
+
+    public Builder setReplicationFactor(ReplicationFactor repFactor) {
+      this.replicationFactor = repFactor;
+      return this;
+    }
 
     public Builder setContainerID(long id) {
       Preconditions.checkState(id >= 0);
@@ -352,11 +420,6 @@ public class ContainerInfo
       return this;
     }
 
-    public Builder setPipeline(Pipeline containerPipeline) {
-      this.pipeline = containerPipeline;
-      return this;
-    }
-
     public Builder setAllocatedBytes(long bytesAllocated) {
       this.allocated = bytesAllocated;
       return this;
@@ -388,9 +451,9 @@ public class ContainerInfo
     }
 
     public ContainerInfo build() {
-      return new
-          ContainerInfo(containerID, state, pipeline, allocated,
-              used, keys, stateEnterTime, owner, deleteTransactionId);
+      return new ContainerInfo(containerID, state, pipelineName, allocated,
+              used, keys, stateEnterTime, owner, deleteTransactionId,
+          replicationFactor, replicationType);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
new file mode 100644
index 0000000..e71d429
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.common.helpers;
+
+import java.util.Comparator;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+/**
+ * Class wraps ozone container info.
+ */
+public class ContainerWithPipeline
+    implements Comparator<ContainerWithPipeline>, Comparable<ContainerWithPipeline> {
+
+  private final ContainerInfo containerInfo;
+  private final Pipeline pipeline;
+
+  public ContainerWithPipeline(ContainerInfo containerInfo, Pipeline pipeline) {
+    this.containerInfo = containerInfo;
+    this.pipeline = pipeline;
+  }
+
+  public ContainerInfo getContainerInfo() {
+    return containerInfo;
+  }
+
+  public Pipeline getPipeline() {
+    return pipeline;
+  }
+
+  public static ContainerWithPipeline fromProtobuf(HddsProtos.ContainerWithPipeline allocatedContainer) {
+    return new ContainerWithPipeline(
+        ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
+        Pipeline.getFromProtoBuf(allocatedContainer.getPipeline()));
+  }
+
+  public HddsProtos.ContainerWithPipeline getProtobuf() {
+    HddsProtos.ContainerWithPipeline.Builder builder =
+        HddsProtos.ContainerWithPipeline.newBuilder();
+    builder.setContainerInfo(getContainerInfo().getProtobuf())
+        .setPipeline(getPipeline().getProtobufMessage());
+
+    return builder.build();
+  }
+
+
+  @Override
+  public String toString() {
+    return containerInfo.toString() + " | " + pipeline.toString();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+
+    ContainerWithPipeline that = (ContainerWithPipeline) o;
+
+    return new EqualsBuilder()
+        .append(getContainerInfo(), that.getContainerInfo())
+        .append(getPipeline(), that.getPipeline())
+        .isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(11, 811)
+        .append(getContainerInfo())
+        .append(getPipeline())
+        .toHashCode();
+  }
+
+  /**
+   * Compares its two arguments for order.  Returns a negative integer, zero, or
+   * a positive integer as the first argument is less than, equal to, or greater
+   * than the second.<p>
+   *
+   * @param o1 the first object to be compared.
+   * @param o2 the second object to be compared.
+   * @return a negative integer, zero, or a positive integer as the first
+   * argument is less than, equal to, or greater than the second.
+   * @throws NullPointerException if an argument is null and this comparator
+   *                              does not permit null arguments
+   * @throws ClassCastException   if the arguments' types prevent them from
+   *                              being compared by this comparator.
+   */
+  @Override
+  public int compare(ContainerWithPipeline o1, ContainerWithPipeline o2) {
+    return o1.getContainerInfo().compareTo(o2.getContainerInfo());
+  }
+
+  /**
+   * Compares this object with the specified object for order.  Returns a
+   * negative integer, zero, or a positive integer as this object is less than,
+   * equal to, or greater than the specified object.
+   *
+   * @param o the object to be compared.
+   * @return a negative integer, zero, or a positive integer as this object is
+   * less than, equal to, or greater than the specified object.
+   * @throws NullPointerException if the specified object is null
+   * @throws ClassCastException   if the specified object's type prevents it
+   *                              from being compared to this object.
+   */
+  @Override
+  public int compareTo(ContainerWithPipeline o) {
+    return this.compare(this, o);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index e8d85e0..b787409 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdds.scm.protocol;
 
 import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -38,7 +39,7 @@ public interface StorageContainerLocationProtocol {
    * set of datanodes that should be used creating this container.
    *
    */
-  ContainerInfo allocateContainer(HddsProtos.ReplicationType replicationType,
+  ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType replicationType,
       HddsProtos.ReplicationFactor factor, String owner)
       throws IOException;
 
@@ -54,6 +55,16 @@ public interface StorageContainerLocationProtocol {
   ContainerInfo getContainer(long containerID) throws IOException;
 
   /**
+   * Ask SCM the location of the container. SCM responds with a group of
+   * nodes where this container and its replicas are located.
+   *
+   * @param containerID - ID of the container.
+   * @return ContainerWithPipeline - the container info with the pipeline.
+   * @throws IOException
+   */
+  ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException;
+
+  /**
    * Ask SCM a list of containers with a range of container names
    * and the limit of count.
    * Search container names between start name(exclusive), and

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index bba4e17..4b03d12 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -20,7 +20,10 @@ import com.google.common.base.Preconditions;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
 import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
@@ -95,7 +98,7 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
    * @throws IOException
    */
   @Override
-  public ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
+  public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType type,
       HddsProtos.ReplicationFactor factor, String owner) throws IOException {
 
     ContainerRequestProto request = ContainerRequestProto.newBuilder()
@@ -114,7 +117,7 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
       throw new IOException(response.hasErrorMessage() ?
           response.getErrorMessage() : "Allocate container failed.");
     }
-    return ContainerInfo.fromProtobuf(response.getContainerInfo());
+    return ContainerWithPipeline.fromProtobuf(response.getContainerWithPipeline());
   }
 
   public ContainerInfo getContainer(long containerID) throws IOException {
@@ -136,6 +139,25 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
   /**
    * {@inheritDoc}
    */
+  public ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException {
+    Preconditions.checkState(containerID >= 0,
+        "Container ID cannot be negative");
+    GetContainerWithPipelineRequestProto request = GetContainerWithPipelineRequestProto
+        .newBuilder()
+        .setContainerID(containerID)
+        .build();
+    try {
+      GetContainerWithPipelineResponseProto response =
+          rpcProxy.getContainerWithPipeline(NULL_RPC_CONTROLLER, request);
+      return ContainerWithPipeline.fromProtobuf(response.getContainerWithPipeline());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
   @Override
   public List<ContainerInfo> listContainer(long startContainerID, int count)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
index 70a0e8a..d66919f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -21,7 +21,10 @@ package org.apache.hadoop.ozone.protocolPB;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto;
 import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
@@ -82,10 +85,11 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
   public ContainerResponseProto allocateContainer(RpcController unused,
       ContainerRequestProto request) throws ServiceException {
     try {
-      ContainerInfo container = impl.allocateContainer(request.getReplicationType(),
-          request.getReplicationFactor(), request.getOwner());
+      ContainerWithPipeline containerWithPipeline = impl
+          .allocateContainer(request.getReplicationType(),
+              request.getReplicationFactor(), request.getOwner());
       return ContainerResponseProto.newBuilder()
-          .setContainerInfo(container.getProtobuf())
+          .setContainerWithPipeline(containerWithPipeline.getProtobuf())
           .setErrorCode(ContainerResponseProto.Error.success)
           .build();
 
@@ -109,6 +113,21 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
   }
 
   @Override
+  public GetContainerWithPipelineResponseProto getContainerWithPipeline(
+      RpcController controller, GetContainerWithPipelineRequestProto request)
+      throws ServiceException {
+    try {
+      ContainerWithPipeline container = impl
+          .getContainerWithPipeline(request.getContainerID());
+      return GetContainerWithPipelineResponseProto.newBuilder()
+          .setContainerWithPipeline(container.getProtobuf())
+          .build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public SCMListContainerResponseProto listContainer(RpcController controller,
       SCMListContainerRequestProto request) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index 090e6eb..143c2ae 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -52,7 +52,7 @@ message ContainerResponseProto {
     errorContainerMissing = 3;
   }
   required Error errorCode = 1;
-  required SCMContainerInfo containerInfo = 2;
+  required ContainerWithPipeline containerWithPipeline = 2;
   optional string errorMessage = 3;
 }
 
@@ -64,6 +64,14 @@ message GetContainerResponseProto {
   required SCMContainerInfo containerInfo = 1;
 }
 
+message GetContainerWithPipelineRequestProto {
+  required int64 containerID = 1;
+}
+
+message GetContainerWithPipelineResponseProto {
+  required ContainerWithPipeline containerWithPipeline = 1;
+}
+
 message SCMListContainerRequestProto {
   required uint32 count = 1;
   optional uint64 startContainerID = 2;
@@ -171,6 +179,11 @@ service StorageContainerLocationProtocolService {
    */
   rpc getContainer(GetContainerRequestProto) returns (GetContainerResponseProto);
 
+  /**
+   * Returns the pipeline for a given container.
+   */
+  rpc getContainerWithPipeline(GetContainerWithPipelineRequestProto) returns (GetContainerWithPipelineResponseProto);
+
   rpc listContainer(SCMListContainerRequestProto) returns (SCMListContainerResponseProto);
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/common/src/main/proto/hdds.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto
index 816efa7..1c9ee19 100644
--- a/hadoop-hdds/common/src/main/proto/hdds.proto
+++ b/hadoop-hdds/common/src/main/proto/hdds.proto
@@ -132,7 +132,7 @@ enum LifeCycleEvent {
 message SCMContainerInfo {
     required int64 containerID = 1;
     required LifeCycleState state = 2;
-    required Pipeline pipeline = 3;
+    optional string pipelineName = 3;
     // This is not total size of container, but space allocated by SCM for
     // clients to write blocks
     required uint64 allocatedBytes = 4;
@@ -141,6 +141,13 @@ message SCMContainerInfo {
     optional int64 stateEnterTime = 7;
     required string owner = 8;
     optional int64 deleteTransactionId = 9;
+    required ReplicationFactor replicationFactor  = 10;
+    required ReplicationType replicationType  = 11;
+}
+
+message ContainerWithPipeline {
+  required SCMContainerInfo containerInfo = 1;
+  required Pipeline pipeline = 2;
 }
 
 message GetScmInfoRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 7cfbdab..953f71e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -16,10 +16,12 @@
  */
 package org.apache.hadoop.hdds.scm.block;
 
+import java.util.UUID;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.Mapping;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -156,13 +158,13 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
     lock.lock();
     try {
       for (int i = 0; i < count; i++) {
-        ContainerInfo containerInfo = null;
+        ContainerWithPipeline containerWithPipeline = null;
         try {
           // TODO: Fix this later when Ratis is made the Default.
-          containerInfo = containerManager.allocateContainer(type, factor,
+          containerWithPipeline = containerManager.allocateContainer(type, factor,
               owner);
 
-          if (containerInfo == null) {
+          if (containerWithPipeline == null) {
             LOG.warn("Unable to allocate container.");
             continue;
           }
@@ -231,30 +233,27 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
                can use different kind of policies.
       */
 
-      ContainerInfo containerInfo;
+      ContainerWithPipeline containerWithPipeline;
 
       // Look for ALLOCATED container that matches all other parameters.
-      containerInfo =
-          containerManager
-              .getStateManager()
-              .getMatchingContainer(
-                  size, owner, type, factor, HddsProtos.LifeCycleState
-                      .ALLOCATED);
-      if (containerInfo != null) {
-        containerManager.updateContainerState(containerInfo.getContainerID(),
+      containerWithPipeline = containerManager
+          .getMatchingContainerWithPipeline(size, owner, type, factor,
+              HddsProtos.LifeCycleState.ALLOCATED);
+      if (containerWithPipeline != null) {
+        containerManager.updateContainerState(
+            containerWithPipeline.getContainerInfo().getContainerID(),
             HddsProtos.LifeCycleEvent.CREATE);
-        return newBlock(containerInfo, HddsProtos.LifeCycleState.ALLOCATED);
+        return newBlock(containerWithPipeline,
+            HddsProtos.LifeCycleState.ALLOCATED);
       }
 
       // Since we found no allocated containers that match our criteria, let us
       // look for OPEN containers that match the criteria.
-      containerInfo =
-          containerManager
-              .getStateManager()
-              .getMatchingContainer(size, owner, type, factor, HddsProtos
-                  .LifeCycleState.OPEN);
-      if (containerInfo != null) {
-        return newBlock(containerInfo, HddsProtos.LifeCycleState.OPEN);
+      containerWithPipeline = containerManager
+          .getMatchingContainerWithPipeline(size, owner, type, factor,
+              HddsProtos.LifeCycleState.OPEN);
+      if (containerWithPipeline != null) {
+        return newBlock(containerWithPipeline, HddsProtos.LifeCycleState.OPEN);
       }
 
       // We found neither ALLOCATED or OPEN Containers. This generally means
@@ -264,16 +263,15 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
       preAllocateContainers(containerProvisionBatchSize, type, factor, owner);
 
       // Since we just allocated a set of containers this should work
-      containerInfo =
-          containerManager
-              .getStateManager()
-              .getMatchingContainer(
-                  size, owner, type, factor, HddsProtos.LifeCycleState
-                      .ALLOCATED);
-      if (containerInfo != null) {
-        containerManager.updateContainerState(containerInfo.getContainerID(),
+      containerWithPipeline = containerManager
+          .getMatchingContainerWithPipeline(size, owner, type, factor,
+              HddsProtos.LifeCycleState.ALLOCATED);
+      if (containerWithPipeline != null) {
+        containerManager.updateContainerState(
+            containerWithPipeline.getContainerInfo().getContainerID(),
             HddsProtos.LifeCycleEvent.CREATE);
-        return newBlock(containerInfo, HddsProtos.LifeCycleState.ALLOCATED);
+        return newBlock(containerWithPipeline,
+            HddsProtos.LifeCycleState.ALLOCATED);
       }
 
       // we have tried all strategies we know and but somehow we are not able
@@ -290,18 +288,28 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
     }
   }
 
+  private String getChannelName(ReplicationType type) {
+    switch (type) {
+      case RATIS:
+        return "RA" + UUID.randomUUID().toString().substring(3);
+      case STAND_ALONE:
+        return "SA" + UUID.randomUUID().toString().substring(3);
+      default:
+        return "RA" + UUID.randomUUID().toString().substring(3);
+    }
+  }
+
   /**
    * newBlock - returns a new block assigned to a container.
    *
-   * @param containerInfo - Container Info.
+   * @param containerWithPipeline - Container Info.
    * @param state - Current state of the container.
    * @return AllocatedBlock
    */
-  private AllocatedBlock newBlock(
-      ContainerInfo containerInfo, HddsProtos.LifeCycleState state)
-      throws IOException {
-
-    if (containerInfo.getPipeline().getMachines().size() == 0) {
+  private AllocatedBlock newBlock(ContainerWithPipeline containerWithPipeline,
+      HddsProtos.LifeCycleState state) throws IOException {
+    ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
+    if (containerWithPipeline.getPipeline().getDatanodes().size() == 0) {
       LOG.error("Pipeline Machine count is zero.");
       return null;
     }
@@ -317,7 +325,7 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
     AllocatedBlock.Builder abb =
         new AllocatedBlock.Builder()
             .setBlockID(new BlockID(containerID, localID))
-            .setPipeline(containerInfo.getPipeline())
+            .setPipeline(containerWithPipeline.getPipeline())
             .setShouldCreateContainer(createContainer);
     LOG.trace("New block allocated : {} Container ID: {}", localID,
         containerID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index 32290cc..d71e7b0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.block;
 
 import com.google.common.collect.ArrayListMultimap;
 import org.apache.hadoop.hdds.scm.container.Mapping;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
@@ -29,6 +28,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.UUID;
 import java.util.stream.Collectors;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 
 /**
  * A wrapper class to hold info about datanode and all deleted block
@@ -54,21 +54,22 @@ public class DatanodeDeletedBlockTransactions {
   }
 
   public void addTransaction(DeletedBlocksTransaction tx) throws IOException {
-    ContainerInfo info = null;
+    Pipeline pipeline = null;
     try {
-      info = mappingService.getContainer(tx.getContainerID());
+      pipeline = mappingService.getContainerWithPipeline(tx.getContainerID())
+          .getPipeline();
     } catch (IOException e) {
       SCMBlockDeletingService.LOG.warn("Got container info error.", e);
     }
 
-    if (info == null) {
+    if (pipeline == null) {
       SCMBlockDeletingService.LOG.warn(
           "Container {} not found, continue to process next",
           tx.getContainerID());
       return;
     }
 
-    for (DatanodeDetails dd : info.getPipeline().getMachines()) {
+    for (DatanodeDetails dd : pipeline.getMachines()) {
       UUID dnID = dd.getUuid();
       if (transactions.containsKey(dnID)) {
         List<DeletedBlocksTransaction> txs = transactions.get(dnID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index 16e84a3..7b24538 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -16,9 +16,11 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
+import java.io.IOException;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -54,22 +56,32 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
 
     LOG.info("Close container Event triggered for container : {}",
         containerID.getId());
-    ContainerStateManager stateManager = containerManager.getStateManager();
-    ContainerInfo info = stateManager.getContainer(containerID);
-    if (info == null) {
-      LOG.info("Container with id : {} does not exist", containerID.getId());
+    ContainerWithPipeline containerWithPipeline = null;
+    ContainerInfo info;
+    try {
+      containerWithPipeline = containerManager.getContainerWithPipeline(containerID.getId());
+      info = containerWithPipeline.getContainerInfo();
+      if (info == null) {
+        LOG.info("Failed to update the container state. Container with id : {} "
+            + "does not exist", containerID.getId());
+        return;
+      }
+    } catch (IOException e) {
+      LOG.info("Failed to update the container state. Container with id : {} "
+          + "does not exist", containerID.getId());
       return;
     }
+
     if (info.getState() == HddsProtos.LifeCycleState.OPEN) {
-      for (DatanodeDetails datanode : info.getPipeline().getMachines()) {
+      for (DatanodeDetails datanode : containerWithPipeline.getPipeline().getMachines()) {
         containerManager.getNodeManager().addDatanodeCommand(datanode.getUuid(),
             new CloseContainerCommand(containerID.getId(),
-                info.getPipeline().getType()));
+                info.getReplicationType()));
       }
       try {
         // Finalize event will make sure the state of the container transitions
         // from OPEN to CLOSING in containerStateManager.
-        stateManager
+        containerManager.getStateManager()
             .updateContainerState(info, HddsProtos.LifeCycleEvent.FINALIZE);
       } catch (SCMException ex) {
         LOG.error("Failed to update the container state for container : {}"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index 9fd30f2..e25c5b4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -21,6 +21,10 @@ import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.SCMContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -167,6 +171,44 @@ public class ContainerMapping implements Mapping {
   }
 
   /**
+   * Returns the ContainerInfo from the container ID.
+   *
+   * @param containerID - ID of container.
+   * @return - ContainerWithPipeline such as creation state and the pipeline.
+   * @throws IOException
+   */
+  @Override
+  public ContainerWithPipeline getContainerWithPipeline(long containerID)
+      throws IOException {
+    ContainerInfo contInfo;
+    lock.lock();
+    try {
+      byte[] containerBytes = containerStore.get(
+          Longs.toByteArray(containerID));
+      if (containerBytes == null) {
+        throw new SCMException(
+            "Specified key does not exist. key : " + containerID,
+            SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER);
+      }
+      HddsProtos.SCMContainerInfo temp = HddsProtos.SCMContainerInfo.PARSER
+          .parseFrom(containerBytes);
+      contInfo = ContainerInfo.fromProtobuf(temp);
+      Pipeline pipeline = pipelineSelector
+          .getPipeline(contInfo.getPipelineName(),
+              contInfo.getReplicationType());
+
+      if(pipeline == null) {
+        pipeline = pipelineSelector
+            .getReplicationPipeline(contInfo.getReplicationType(),
+                contInfo.getReplicationFactor());
+      }
+      return new ContainerWithPipeline(contInfo, pipeline);
+    } finally {
+      lock.unlock();
+    }
+  }
+
+  /**
    * {@inheritDoc}
    */
   @Override
@@ -208,13 +250,15 @@ public class ContainerMapping implements Mapping {
    * @throws IOException - Exception
    */
   @Override
-  public ContainerInfo allocateContainer(
+  public ContainerWithPipeline allocateContainer(
       ReplicationType type,
       ReplicationFactor replicationFactor,
       String owner)
       throws IOException {
 
     ContainerInfo containerInfo;
+    ContainerWithPipeline containerWithPipeline;
+
     if (!nodeManager.isOutOfChillMode()) {
       throw new SCMException(
           "Unable to create container while in chill mode",
@@ -223,9 +267,9 @@ public class ContainerMapping implements Mapping {
 
     lock.lock();
     try {
-      containerInfo =
-          containerStateManager.allocateContainer(
+      containerWithPipeline = containerStateManager.allocateContainer(
               pipelineSelector, type, replicationFactor, owner);
+      containerInfo = containerWithPipeline.getContainerInfo();
 
       byte[] containerIDBytes = Longs.toByteArray(
           containerInfo.getContainerID());
@@ -234,7 +278,7 @@ public class ContainerMapping implements Mapping {
     } finally {
       lock.unlock();
     }
-    return containerInfo;
+    return containerWithPipeline;
   }
 
   /**
@@ -381,6 +425,35 @@ public class ContainerMapping implements Mapping {
   }
 
   /**
+   * Return a container matching the attributes specified.
+   *
+   * @param size - Space needed in the Container.
+   * @param owner - Owner of the container - A specific nameservice.
+   * @param type - Replication Type {StandAlone, Ratis}
+   * @param factor - Replication Factor {ONE, THREE}
+   * @param state - State of the Container-- {Open, Allocated etc.}
+   * @return ContainerInfo, null if there is no match found.
+   */
+  public ContainerWithPipeline getMatchingContainerWithPipeline(final long size,
+      String owner, ReplicationType type, ReplicationFactor factor,
+      LifeCycleState state) throws IOException {
+    ContainerInfo containerInfo = getStateManager()
+        .getMatchingContainer(size, owner, type, factor, state);
+    if (containerInfo == null) {
+      return null;
+    }
+    Pipeline pipeline = pipelineSelector
+        .getPipeline(containerInfo.getPipelineName(),
+            containerInfo.getReplicationType());
+    if (pipeline == null) {
+      pipelineSelector
+          .getReplicationPipeline(containerInfo.getReplicationType(),
+              containerInfo.getReplicationFactor());
+    }
+    return new ContainerWithPipeline(containerInfo, pipeline);
+  }
+
+  /**
    * Process container report from Datanode.
    * <p>
    * Processing follows a very simple logic for time being.
@@ -415,7 +488,7 @@ public class ContainerMapping implements Mapping {
               HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes);
 
           HddsProtos.SCMContainerInfo newState =
-              reconcileState(datanodeState, knownState);
+              reconcileState(datanodeState, knownState, datanodeDetails);
 
           // FIX ME: This can be optimized, we write twice to memory, where a
           // single write would work well.
@@ -425,8 +498,14 @@ public class ContainerMapping implements Mapping {
           containerStore.put(dbKey, newState.toByteArray());
 
           // If the container is closed, then state is already written to SCM
+          Pipeline pipeline = pipelineSelector.getPipeline(newState.getPipelineName(), newState.getReplicationType());
+          if(pipeline == null) {
+            pipeline = pipelineSelector
+                .getReplicationPipeline(newState.getReplicationType(),
+                    newState.getReplicationFactor());
+          }
           // DB.TODO: So can we can write only once to DB.
-          if (closeContainerIfNeeded(newState)) {
+          if (closeContainerIfNeeded(newState, pipeline)) {
             LOG.info("Closing the Container: {}", newState.getContainerID());
           }
         } else {
@@ -447,15 +526,22 @@ public class ContainerMapping implements Mapping {
    *
    * @param datanodeState - State from the Datanode.
    * @param knownState - State inside SCM.
+   * @param dnDetails
    * @return new SCM State for this container.
    */
   private HddsProtos.SCMContainerInfo reconcileState(
       StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState,
-      HddsProtos.SCMContainerInfo knownState) {
+      SCMContainerInfo knownState, DatanodeDetails dnDetails) {
     HddsProtos.SCMContainerInfo.Builder builder =
         HddsProtos.SCMContainerInfo.newBuilder();
-    builder.setContainerID(knownState.getContainerID());
-    builder.setPipeline(knownState.getPipeline());
+    builder.setContainerID(knownState.getContainerID())
+        .setPipelineName(knownState.getPipelineName())
+        .setReplicationType(knownState.getReplicationType())
+        .setReplicationFactor(knownState.getReplicationFactor());
+
+    // TODO: If current state doesn't have this DN in list of DataNodes with replica
+    // then add it in list of replicas.
+
     // If used size is greater than allocated size, we will be updating
     // allocated size with used size. This update is done as a fallback
     // mechanism in case SCM crashes without properly updating allocated
@@ -464,13 +550,13 @@ public class ContainerMapping implements Mapping {
     long usedSize = datanodeState.getUsed();
     long allocated = knownState.getAllocatedBytes() > usedSize ?
         knownState.getAllocatedBytes() : usedSize;
-    builder.setAllocatedBytes(allocated);
-    builder.setUsedBytes(usedSize);
-    builder.setNumberOfKeys(datanodeState.getKeyCount());
-    builder.setState(knownState.getState());
-    builder.setStateEnterTime(knownState.getStateEnterTime());
-    builder.setContainerID(knownState.getContainerID());
-    builder.setDeleteTransactionId(knownState.getDeleteTransactionId());
+    builder.setAllocatedBytes(allocated)
+        .setUsedBytes(usedSize)
+        .setNumberOfKeys(datanodeState.getKeyCount())
+        .setState(knownState.getState())
+        .setStateEnterTime(knownState.getStateEnterTime())
+        .setContainerID(knownState.getContainerID())
+        .setDeleteTransactionId(knownState.getDeleteTransactionId());
     if (knownState.getOwner() != null) {
       builder.setOwner(knownState.getOwner());
     }
@@ -485,9 +571,11 @@ public class ContainerMapping implements Mapping {
    * one protobuf in one file and another definition in another file.
    *
    * @param newState - This is the state we maintain in SCM.
+   * @param pipeline
    * @throws IOException
    */
-  private boolean closeContainerIfNeeded(HddsProtos.SCMContainerInfo newState)
+  private boolean closeContainerIfNeeded(SCMContainerInfo newState,
+      Pipeline pipeline)
       throws IOException {
     float containerUsedPercentage = 1.0f *
         newState.getUsedBytes() / this.size;
@@ -498,7 +586,7 @@ public class ContainerMapping implements Mapping {
       // We will call closer till get to the closed state.
       // That is SCM will make this call repeatedly until we reach the closed
       // state.
-      closer.close(newState);
+      closer.close(newState, pipeline);
 
       if (shouldClose(scmInfo)) {
         // This event moves the Container from Open to Closing State, this is
@@ -598,10 +686,12 @@ public class ContainerMapping implements Mapping {
               .setAllocatedBytes(info.getAllocatedBytes())
               .setNumberOfKeys(oldInfo.getNumberOfKeys())
               .setOwner(oldInfo.getOwner())
-              .setPipeline(oldInfo.getPipeline())
+              .setPipelineName(oldInfo.getPipelineName())
               .setState(oldInfo.getState())
               .setUsedBytes(oldInfo.getUsedBytes())
               .setDeleteTransactionId(oldInfo.getDeleteTransactionId())
+              .setReplicationFactor(oldInfo.getReplicationFactor())
+              .setReplicationType(oldInfo.getReplicationType())
               .build();
           containerStore.put(dbKey, newInfo.getProtobuf().toByteArray());
         } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 08733bd..870ab1d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.scm.container;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.states.ContainerState;
@@ -279,10 +280,10 @@ public class ContainerStateManager implements Closeable {
    * @param selector -- Pipeline selector class.
    * @param type -- Replication type.
    * @param replicationFactor - Replication replicationFactor.
-   * @return Container Info.
+   * @return ContainerWithPipeline
    * @throws IOException  on Failure.
    */
-  public ContainerInfo allocateContainer(PipelineSelector selector, HddsProtos
+  public ContainerWithPipeline allocateContainer(PipelineSelector selector, HddsProtos
       .ReplicationType type, HddsProtos.ReplicationFactor replicationFactor,
       String owner) throws IOException {
 
@@ -295,7 +296,7 @@ public class ContainerStateManager implements Closeable {
 
     ContainerInfo containerInfo = new ContainerInfo.Builder()
         .setState(HddsProtos.LifeCycleState.ALLOCATED)
-        .setPipeline(pipeline)
+        .setPipelineName(pipeline.getPipelineName())
         // This is bytes allocated for blocks inside container, not the
         // container size
         .setAllocatedBytes(0)
@@ -305,11 +306,13 @@ public class ContainerStateManager implements Closeable {
         .setOwner(owner)
         .setContainerID(containerCount.incrementAndGet())
         .setDeleteTransactionId(0)
+        .setReplicationFactor(replicationFactor)
+        .setReplicationType(pipeline.getType())
         .build();
     Preconditions.checkNotNull(containerInfo);
     containers.addContainer(containerInfo);
     LOG.trace("New container allocated: {}", containerInfo);
-    return containerInfo;
+    return new ContainerWithPipeline(containerInfo, pipeline);
   }
 
   /**
@@ -432,8 +435,8 @@ public class ContainerStateManager implements Closeable {
         containerInfo.updateLastUsedTime();
 
         ContainerState key = new ContainerState(owner,
-            containerInfo.getPipeline().getType(),
-            containerInfo.getPipeline().getFactor());
+            containerInfo.getReplicationType(),
+            containerInfo.getReplicationFactor());
         lastUsedMap.put(key, containerInfo.containerID());
         return containerInfo;
       }
@@ -458,6 +461,20 @@ public class ContainerStateManager implements Closeable {
   }
 
   /**
+   * Returns the containerInfo with pipeline for the given container id.
+   * @param selector -- Pipeline selector class.
+   * @param containerID id of the container
+   * @return ContainerInfo containerInfo
+   * @throws IOException
+   */
+  public ContainerWithPipeline getContainer(PipelineSelector selector,
+      ContainerID containerID) throws IOException {
+    ContainerInfo info = containers.getContainerInfo(containerID.getId());
+    Pipeline pipeline = selector.getPipeline(info.getPipelineName(), info.getReplicationType());
+    return new ContainerWithPipeline(info, pipeline);
+  }
+
+  /**
    * Returns the containerInfo for the given container id.
    * @param containerID id of the container
    * @return ContainerInfo containerInfo
@@ -466,6 +483,7 @@ public class ContainerStateManager implements Closeable {
   public ContainerInfo getContainer(ContainerID containerID) {
     return containers.getContainerInfo(containerID.getId());
   }
+
   @Override
   public void close() throws IOException {
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
index e77a4b6..f52eb05 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java
@@ -17,6 +17,10 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
@@ -43,6 +47,16 @@ public interface Mapping extends Closeable {
   ContainerInfo getContainer(long containerID) throws IOException;
 
   /**
+   * Returns the ContainerInfo from the container ID.
+   *
+   * @param containerID - ID of container.
+   * @return - ContainerWithPipeline such as creation state and the pipeline.
+   * @throws IOException
+   */
+  ContainerWithPipeline getContainerWithPipeline(long containerID)
+      throws IOException;
+
+  /**
    * Returns containers under certain conditions.
    * Search container IDs from start ID(exclusive),
    * The max size of the searching range cannot exceed the
@@ -65,10 +79,10 @@ public interface Mapping extends Closeable {
    *
    * @param replicationFactor - replication factor of the container.
    * @param owner
-   * @return - Container Info.
+   * @return - ContainerWithPipeline.
    * @throws IOException
    */
-  ContainerInfo allocateContainer(HddsProtos.ReplicationType type,
+  ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType type,
       HddsProtos.ReplicationFactor replicationFactor, String owner)
       throws IOException;
 
@@ -120,4 +134,12 @@ public interface Mapping extends Closeable {
    * @return NodeManager
    */
   NodeManager getNodeManager();
+
+  /**
+   * Returns the ContainerWithPipeline.
+   * @return NodeManager
+   */
+  public ContainerWithPipeline getMatchingContainerWithPipeline(final long size,
+      String owner, ReplicationType type, ReplicationFactor factor,
+      LifeCycleState state) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
index cbb2ba7..3ca8ba9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java
@@ -22,6 +22,8 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.SCMContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -90,8 +92,10 @@ public class ContainerCloser {
    * lives.
    *
    * @param info - ContainerInfo.
+   * @param pipeline
    */
-  public void close(HddsProtos.SCMContainerInfo info) {
+  public void close(SCMContainerInfo info,
+      Pipeline pipeline) {
 
     if (commandIssued.containsKey(info.getContainerID())) {
       // We check if we issued a close command in last 3 * reportInterval secs.
@@ -126,13 +130,10 @@ public class ContainerCloser {
     // this queue can be emptied by a datanode after a close report is send
     // to SCM. In that case also, data node will ignore this command.
 
-    HddsProtos.Pipeline pipeline = info.getPipeline();
-    for (HddsProtos.DatanodeDetailsProto datanodeDetails :
-        pipeline.getMembersList()) {
-      nodeManager.addDatanodeCommand(
-          DatanodeDetails.getFromProtoBuf(datanodeDetails).getUuid(),
+    for (DatanodeDetails datanodeDetails : pipeline.getMachines()) {
+      nodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
           new CloseContainerCommand(info.getContainerID(),
-              pipeline.getType()));
+              info.getReplicationType()));
     }
     if (!commandIssued.containsKey(info.getContainerID())) {
       commandIssued.put(info.getContainerID(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index 48c6423..3ada8fe 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -116,7 +116,8 @@ public class ContainerStateMap {
   public void addContainer(ContainerInfo info)
       throws SCMException {
     Preconditions.checkNotNull(info, "Container Info cannot be null");
-    Preconditions.checkNotNull(info.getPipeline(), "Pipeline cannot be null");
+    Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0,
+        "ExpectedReplicaCount should be greater than 0");
 
     try (AutoCloseableLock lock = autoLock.acquire()) {
       ContainerID id = ContainerID.valueof(info.getContainerID());
@@ -129,8 +130,8 @@ public class ContainerStateMap {
 
       lifeCycleStateMap.insert(info.getState(), id);
       ownerMap.insert(info.getOwner(), id);
-      factorMap.insert(info.getPipeline().getFactor(), id);
-      typeMap.insert(info.getPipeline().getType(), id);
+      factorMap.insert(info.getReplicationFactor(), id);
+      typeMap.insert(info.getReplicationType(), id);
       LOG.trace("Created container with {} successfully.", id);
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
index 48affa4..a1fbce6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java
@@ -16,6 +16,9 @@
  */
 package org.apache.hadoop.hdds.scm.pipelines;
 
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.WeakHashMap;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -25,7 +28,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -36,11 +38,13 @@ public abstract class PipelineManager {
   private static final Logger LOG =
       LoggerFactory.getLogger(PipelineManager.class);
   private final List<Pipeline> activePipelines;
+  private final Map<String, Pipeline> activePipelineMap;
   private final AtomicInteger pipelineIndex;
 
   public PipelineManager() {
     activePipelines = new LinkedList<>();
     pipelineIndex = new AtomicInteger(0);
+    activePipelineMap = new WeakHashMap<>();
   }
 
   /**
@@ -76,6 +80,7 @@ public abstract class PipelineManager {
               "replicationType:{} replicationFactor:{}",
           pipeline.getPipelineName(), replicationType, replicationFactor);
       activePipelines.add(pipeline);
+      activePipelineMap.put(pipeline.getPipelineName(), pipeline);
     } else {
       pipeline =
           findOpenPipeline(replicationType, replicationFactor);
@@ -94,6 +99,26 @@ public abstract class PipelineManager {
     }
   }
 
+  /**
+   * This function to get pipeline with given pipeline name.
+   *
+   * @param pipelineName
+   * @return a Pipeline.
+   */
+  public synchronized final Pipeline getPipeline(String pipelineName) {
+    Pipeline pipeline = null;
+
+    // 1. Check if pipeline channel already exists
+    if (activePipelineMap.containsKey(pipelineName)) {
+      pipeline = activePipelineMap.get(pipelineName);
+      LOG.debug("Returning pipeline for pipelineName:{}", pipelineName);
+      return pipeline;
+    } else {
+      LOG.debug("Unable to find pipeline for pipelineName:{}", pipelineName);
+    }
+    return pipeline;
+  }
+
   protected int getReplicationCount(ReplicationFactor factor) {
     switch (factor) {
     case ONE:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
index 508ca9b..3846a84 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.pipelines;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;
@@ -177,6 +178,21 @@ public class PipelineSelector {
   }
 
   /**
+   * This function to return pipeline for given pipeline name and replication
+   * type.
+   */
+  public Pipeline getPipeline(String pipelineName,
+      ReplicationType replicationType) throws IOException {
+    if (pipelineName == null) {
+      return null;
+    }
+    PipelineManager manager = getPipelineManager(replicationType);
+    Preconditions.checkNotNull(manager, "Found invalid pipeline manager");
+    LOG.debug("Getting replication pipeline forReplicationType {} :" +
+        " pipelineName:{}", replicationType, pipelineName);
+    return manager.getPipeline(pipelineName);
+  }
+  /**
    * Creates a pipeline from a specified set of Nodes.
    */
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
index ace8758..189060e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.pipelines.ratis;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.XceiverClientRatis;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
index e76027f..579a3a2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java
@@ -17,6 +17,7 @@
 package org.apache.hadoop.hdds.scm.pipelines.standalone;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index d73cccd..e1d478f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerLocationProtocolProtos;
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.ScmInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
@@ -145,11 +146,12 @@ public class SCMClientProtocolServer implements
   }
 
   @Override
-  public ContainerInfo allocateContainer(HddsProtos.ReplicationType
+  public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType
       replicationType, HddsProtos.ReplicationFactor factor,
       String owner) throws IOException {
     String remoteUser = getRpcRemoteUsername();
     getScm().checkAdminAccess(remoteUser);
+
     return scm.getScmContainerManager()
         .allocateContainer(replicationType, factor, owner);
   }
@@ -163,6 +165,14 @@ public class SCMClientProtocolServer implements
   }
 
   @Override
+  public ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException {
+    String remoteUser = getRpcRemoteUsername();
+    getScm().checkAdminAccess(remoteUser);
+    return scm.getScmContainerManager()
+        .getContainerWithPipeline(containerID);
+  }
+
+  @Override
   public List<ContainerInfo> listContainer(long startContainerID,
       int count) throws IOException {
     return scm.getScmContainerManager().
@@ -248,7 +258,7 @@ public class SCMClientProtocolServer implements
       throws IOException {
     // TODO: will be addressed in future patch.
     // This is needed only for debugging purposes to make sure cluster is
-    // working correctly. 
+    // working correctly.
     return null;
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDFS-13710. RBF: setQuota and getQuotaUsage should check the dfs.federation.router.quota.enable. Contributed by yanghuafeng.

Posted by vi...@apache.org.
HDFS-13710. RBF: setQuota and getQuotaUsage should check the dfs.federation.router.quota.enable. Contributed by yanghuafeng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43f7fe8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43f7fe8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43f7fe8a

Branch: refs/heads/HDFS-12090
Commit: 43f7fe8aae0eca89cce4d67bfc4965fe8ce63e38
Parents: 7a68ac6
Author: Yiqun Lin <yq...@apache.org>
Authored: Mon Jul 9 15:06:07 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Mon Jul 9 15:06:07 2018 +0800

----------------------------------------------------------------------
 .../hdfs/server/federation/router/Quota.java    |  8 ++
 .../federation/router/RouterRpcServer.java      |  1 -
 .../router/TestDisableRouterQuota.java          | 94 ++++++++++++++++++++
 3 files changed, 102 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f7fe8a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
index 413a4e1..75d3e04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
@@ -67,6 +67,9 @@ public class Quota {
   public void setQuota(String path, long namespaceQuota,
       long storagespaceQuota, StorageType type) throws IOException {
     rpcServer.checkOperation(OperationCategory.WRITE);
+    if (!router.isQuotaEnabled()) {
+      throw new IOException("The quota system is disabled in Router.");
+    }
 
     // Set quota for current path and its children mount table path.
     final List<RemoteLocation> locations = getQuotaRemoteLocations(path);
@@ -91,6 +94,11 @@ public class Quota {
    * @throws IOException
    */
   public QuotaUsage getQuotaUsage(String path) throws IOException {
+    rpcServer.checkOperation(OperationCategory.READ);
+    if (!router.isQuotaEnabled()) {
+      throw new IOException("The quota system is disabled in Router.");
+    }
+
     final List<RemoteLocation> quotaLocs = getValidQuotaLocations(path);
     RemoteMethod method = new RemoteMethod("getQuotaUsage",
         new Class<?>[] {String.class}, new RemoteParam());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f7fe8a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 716ebee..7031af7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -1996,7 +1996,6 @@ public class RouterRpcServer extends AbstractService
 
   @Override // ClientProtocol
   public QuotaUsage getQuotaUsage(String path) throws IOException {
-    checkOperation(OperationCategory.READ);
     return this.quotaCall.getQuotaUsage(path);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f7fe8a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java
new file mode 100644
index 0000000..2632f59
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+
+import java.io.IOException;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+
+/**
+ * Test the behavior when disabling the Router quota.
+ */
+public class TestDisableRouterQuota {
+
+  private static Router router;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    // Build and start a router
+    router = new Router();
+    Configuration routerConf = new RouterConfigBuilder()
+        .quota(false) //set false to verify the quota disabled in Router
+        .rpc()
+        .build();
+    router.init(routerConf);
+    router.setRouterId("TestRouterId");
+    router.start();
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    if (router != null) {
+      router.stop();
+      router.close();
+    }
+  }
+
+  @Before
+  public void checkDisableQuota() {
+    assertFalse(router.isQuotaEnabled());
+  }
+
+  @Test
+  public void testSetQuota() throws Exception {
+    long nsQuota = 1024;
+    long ssQuota = 1024;
+
+    try {
+      Quota quotaModule = router.getRpcServer().getQuotaModule();
+      quotaModule.setQuota("/test", nsQuota, ssQuota, null);
+      fail("The setQuota call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "The quota system is disabled in Router.", ioe);
+    }
+  }
+
+  @Test
+  public void testGetQuotaUsage() throws Exception {
+    try {
+      Quota quotaModule = router.getRpcServer().getQuotaModule();
+      quotaModule.getQuotaUsage("/test");
+      fail("The getQuotaUsage call should fail.");
+    } catch (IOException ioe) {
+      GenericTestUtils.assertExceptionContains(
+          "The quota system is disabled in Router.", ioe);
+    }
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDFS-13674. Improve documentation on Metrics. Contributed by Chao Sun.

Posted by vi...@apache.org.
HDFS-13674. Improve documentation on Metrics. Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a68ac60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a68ac60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a68ac60

Branch: refs/heads/HDFS-12090
Commit: 7a68ac607c52c8a28dcd75a367ae77331787a3b4
Parents: 790c563
Author: Yiqun Lin <yq...@apache.org>
Authored: Mon Jul 9 14:27:34 2018 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Mon Jul 9 14:27:34 2018 +0800

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md  | 39 ++++++++++++--------
 1 file changed, 24 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a68ac60/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 676ab0b..2c7bd4d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -159,14 +159,17 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
 | `TransactionsAvgTime` | Average time of Journal transactions in milliseconds |
 | `SyncsNumOps` | Total number of Journal syncs |
 | `SyncsAvgTime` | Average time of Journal syncs in milliseconds |
+| `SyncsTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of Journal sync time in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TransactionsBatchedInSync` | Total number of Journal transactions batched in sync |
+| `TransactionsBatchedInSync`*num*`s(50/75/90/95/99)thPercentileCount` | The 50/75/90/95/99th percentile of number of batched Journal transactions (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `StorageBlockReportNumOps` | Total number of processing block reports from individual storages in DataNode |
 | `StorageBlockReportAvgTime` | Average time of processing block reports in milliseconds |
+| `StorageBlockReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of block report processing time in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `CacheReportNumOps` | Total number of processing cache reports from DataNode |
 | `CacheReportAvgTime` | Average time of processing cache reports in milliseconds |
+| `CacheReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of cached report processing time in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SafeModeTime` | The interval between FSNameSystem starts and the last time safemode leaves in milliseconds.  (sometimes not equal to the time in SafeMode, see [HDFS-5156](https://issues.apache.org/jira/browse/HDFS-5156)) |
 | `FsImageLoadTime` | Time loading FS Image at startup in milliseconds |
-| `FsImageLoadTime` | Time loading FS Image at startup in milliseconds |
 | `GetEditNumOps` | Total number of edits downloads from SecondaryNameNode |
 | `GetEditAvgTime` | Average edits download time in milliseconds |
 | `GetImageNumOps` | Total number of fsimage downloads from SecondaryNameNode |
@@ -177,22 +180,23 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
 | `NNStartedTimeInMillis`| NameNode start time in milliseconds |
 | `GenerateEDEKTimeNumOps` | Total number of generating EDEK |
 | `GenerateEDEKTimeAvgTime` | Average time of generating EDEK in milliseconds |
+| `GenerateEDEKTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in generating EDEK in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
 | `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
-| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
-| `StorageBlockReport`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of storage block report latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `WarmUpEDEKTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in warming up EDEK in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of of NameNode resource check latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `EditLogTailTimeNumOps` | Total number of times the standby NameNode tailed the edit log |
 | `EditLogTailTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in tailing edit log |
-| `EditLogTailTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in tailing edit logs by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `EditLogTailTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in tailing edit logs by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `EditLogFetchTimeNumOps` | Total number of times the standby NameNode fetched remote edit streams from journal nodes |
 | `EditLogFetchTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in fetching remote edit streams from journal nodes |
-| `EditLogFetchTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in fetching edit streams from journal nodes by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `EditLogFetchTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in fetching edit streams from journal nodes by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `NumEditLogLoadedNumOps` | Total number of times edits were loaded by standby NameNode |
 | `NumEditLogLoadedAvgCount` | Average number of edits loaded by standby NameNode in each edit log tailing |
-| `NumEditLogLoaded`*num*`s(50/75/90/95/99)thPercentileCount` | The 50/75/90/95/99th percentile of number of edits loaded by standby NameNode in each edit log tailing. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `NumEditLogLoaded`*num*`s(50/75/90/95/99)thPercentileCount` | The 50/75/90/95/99th percentile of number of edits loaded by standby NameNode in each edit log tailing (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `EditLogTailIntervalNumOps` | Total number of intervals between edit log tailings by standby NameNode |
 | `EditLogTailIntervalAvgTime` | Average time of intervals between edit log tailings by standby NameNode in milliseconds |
-| `EditLogTailInterval`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time between edit log tailings by standby NameNode, in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `EditLogTailInterval`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time between edit log tailings by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 
 FSNamesystem
 ------------
@@ -338,13 +342,13 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `RamDiskBlocksEvictedWithoutRead` | Total number of blocks evicted in memory without ever being read from memory |
 | `RamDiskBlocksEvictionWindowMsNumOps` | Number of blocks evicted in memory|
 | `RamDiskBlocksEvictionWindowMsAvgTime` | Average time of blocks in memory before being evicted in milliseconds |
-| `RamDiskBlocksEvictionWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and eviction in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksEvictionWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and eviction in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `RamDiskBlocksLazyPersisted` | Total number of blocks written to disk by lazy writer |
 | `RamDiskBlocksDeletedBeforeLazyPersisted` | Total number of blocks deleted by application before being persisted to disk |
 | `RamDiskBytesLazyPersisted` | Total number of bytes written to disk by lazy writer |
 | `RamDiskBlocksLazyPersistWindowMsNumOps` | Number of blocks written to disk by lazy writer |
 | `RamDiskBlocksLazyPersistWindowMsAvgTime` | Average time of blocks written to disk by lazy writer in milliseconds |
-| `RamDiskBlocksLazyPersistWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and disk persist in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksLazyPersistWindows`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of latency between memory write and disk persist in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FsyncCount` | Total number of fsync |
 | `VolumeFailures` | Total number of volume failures occurred |
 | `ReadBlockOpNumOps` | Total number of read operations |
@@ -371,14 +375,19 @@ Each metrics record contains tags such as SessionId and Hostname as additional i
 | `CacheReportsAvgTime` | Average time of cache report operations in milliseconds |
 | `PacketAckRoundTripTimeNanosNumOps` | Total number of ack round trip |
 | `PacketAckRoundTripTimeNanosAvgTime` | Average time from ack send to receive minus the downstream ack time in nanoseconds |
+| `PacketAckRoundTripTimeNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile latency from ack send to receive minus the downstream ack time in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FlushNanosNumOps` | Total number of flushes |
 | `FlushNanosAvgTime` | Average flush time in nanoseconds |
+| `FlushNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile flush time in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FsyncNanosNumOps` | Total number of fsync |
 | `FsyncNanosAvgTime` | Average fsync time in nanoseconds |
+| `FsyncNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile fsync time in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SendDataPacketBlockedOnNetworkNanosNumOps` | Total number of sending packets |
 | `SendDataPacketBlockedOnNetworkNanosAvgTime` | Average waiting time of sending packets in nanoseconds |
+| `SendDataPacketBlockedOnNetworkNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile waiting time of sending packets in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SendDataPacketTransferNanosNumOps` | Total number of sending packets |
 | `SendDataPacketTransferNanosAvgTime` | Average transfer time of sending packets in nanoseconds |
+| `SendDataPacketTransferNanos`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile transfer time of sending packets in nanoseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalWriteTime`| Total number of milliseconds spent on write operation |
 | `TotalReadTime` | Total number of milliseconds spent on read operation |
 | `RemoteBytesRead` | Number of bytes read by remote clients |
@@ -410,23 +419,23 @@ contains tags such as Hostname as additional information along with metrics.
 | `TotalMetadataOperations` | Total number (monotonically increasing) of metadata operations. Metadata operations include stat, list, mkdir, delete, move, open and posix_fadvise. |
 | `MetadataOperationRateNumOps` | The number of metadata operations within an interval time of metric |
 | `MetadataOperationRateAvgTime` | Mean time of metadata operations in milliseconds |
-| `MetadataOperationLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of metadata operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `MetadataOperationLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of metadata operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalDataFileIos` | Total number (monotonically increasing) of data file io operations |
 | `DataFileIoRateNumOps` | The number of data file io operations within an interval time of metric |
 | `DataFileIoRateAvgTime` | Mean time of data file io operations in milliseconds |
-| `DataFileIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of data file io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `DataFileIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of data file io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `FlushIoRateNumOps` | The number of file flush io operations within an interval time of metric |
 | `FlushIoRateAvgTime` | Mean time of file flush io operations in milliseconds |
-| `FlushIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file flush io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `FlushIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file flush io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `SyncIoRateNumOps` | The number of file sync io operations within an interval time of metric |
 | `SyncIoRateAvgTime` | Mean time of file sync io operations in milliseconds |
-| `SyncIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file sync io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `SyncIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file sync io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `ReadIoRateNumOps` | The number of file read io operations within an interval time of metric |
 | `ReadIoRateAvgTime` | Mean time of file read io operations in milliseconds |
-| `ReadIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file read io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `ReadIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file read io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `WriteIoRateNumOps` | The number of file write io operations within an interval time of metric |
 | `WriteIoRateAvgTime` | Mean time of file write io operations in milliseconds |
-| `WriteIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file write io operations latency in milliseconds. Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `WriteIoLatency`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of file write io operations latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
 | `TotalFileIoErrors` | Total number (monotonically increasing) of file io error operations |
 | `FileIoErrorRateNumOps` | The number of file io error operations within an interval time of metric |
 | `FileIoErrorRateAvgTime` | It measures the mean time in milliseconds from the start of an operation to hitting a failure |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: YARN-8492. ATSv2 HBase tests are failing with ClassNotFoundException. Contributed by Rohith Sharma K S.

Posted by vi...@apache.org.
YARN-8492. ATSv2 HBase tests are failing with ClassNotFoundException. Contributed by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4bf38cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4bf38cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4bf38cf

Branch: refs/heads/HDFS-12090
Commit: e4bf38cf50943565796c00f8b5711a2882813488
Parents: 498e3bf
Author: Sunil G <su...@apache.org>
Authored: Fri Jul 6 12:05:32 2018 -0700
Committer: Sunil G <su...@apache.org>
Committed: Fri Jul 6 12:05:32 2018 -0700

----------------------------------------------------------------------
 .../pom.xml                                               | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4bf38cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
index 8c143d3..05a5c65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/pom.xml
@@ -133,6 +133,10 @@
           <groupId>org.apache.hadoop</groupId>
           <artifactId>hadoop-auth</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs-client</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 
@@ -355,6 +359,12 @@
       <artifactId>jetty-webapp</artifactId>
       <scope>test</scope>
     </dependency>
+
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDFS-13721. NPE in DataNode due to uninitialized DiskBalancer.

Posted by vi...@apache.org.
HDFS-13721. NPE in DataNode due to uninitialized DiskBalancer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/936e0df0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/936e0df0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/936e0df0

Branch: refs/heads/HDFS-12090
Commit: 936e0df0d344f13eea97fe624b154e8356cdea7c
Parents: ba68320
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Jul 6 20:45:27 2018 -0700
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Jul 6 21:00:32 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/datanode/DataNode.java    | 19 +++++++++++++------
 .../server/diskbalancer/TestDiskBalancer.java    |  5 +++++
 2 files changed, 18 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/936e0df0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 4baafb9..7df92f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3132,7 +3132,7 @@ public class DataNode extends ReconfigurableBase
   @Override // DataNodeMXBean
   public String getDiskBalancerStatus() {
     try {
-      return this.diskBalancer.queryWorkStatus().toJsonString();
+      return getDiskBalancer().queryWorkStatus().toJsonString();
     } catch (IOException ex) {
       LOG.debug("Reading diskbalancer Status failed. ex:{}", ex);
       return "";
@@ -3510,7 +3510,7 @@ public class DataNode extends ReconfigurableBase
           DiskBalancerException.Result.DATANODE_STATUS_NOT_REGULAR);
     }
 
-    this.diskBalancer.submitPlan(planID, planVersion, planFile, planData,
+    getDiskBalancer().submitPlan(planID, planVersion, planFile, planData,
             skipDateCheck);
   }
 
@@ -3522,7 +3522,7 @@ public class DataNode extends ReconfigurableBase
   public void cancelDiskBalancePlan(String planID) throws
       IOException {
     checkSuperuserPrivilege();
-    this.diskBalancer.cancelPlan(planID);
+    getDiskBalancer().cancelPlan(planID);
   }
 
   /**
@@ -3533,7 +3533,7 @@ public class DataNode extends ReconfigurableBase
   @Override
   public DiskBalancerWorkStatus queryDiskBalancerPlan() throws IOException {
     checkSuperuserPrivilege();
-    return this.diskBalancer.queryWorkStatus();
+    return getDiskBalancer().queryWorkStatus();
   }
 
   /**
@@ -3550,9 +3550,9 @@ public class DataNode extends ReconfigurableBase
     Preconditions.checkNotNull(key);
     switch (key) {
     case DiskBalancerConstants.DISKBALANCER_VOLUME_NAME:
-      return this.diskBalancer.getVolumeNames();
+      return getDiskBalancer().getVolumeNames();
     case DiskBalancerConstants.DISKBALANCER_BANDWIDTH :
-      return Long.toString(this.diskBalancer.getBandwidth());
+      return Long.toString(getDiskBalancer().getBandwidth());
     default:
       LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: {}",
           key);
@@ -3606,4 +3606,11 @@ public class DataNode extends ReconfigurableBase
     }
     return volumeInfoList;
   }
+
+  private DiskBalancer getDiskBalancer() throws IOException {
+    if (this.diskBalancer == null) {
+      throw new IOException("DiskBalancer is not initialized");
+    }
+    return this.diskBalancer;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/936e0df0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index 55cc57e..e789694 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -109,6 +109,11 @@ public class TestDiskBalancer {
           .getFsVolumeReferences()) {
         assertEquals(ref.size(), dbDnNode.getVolumeCount());
       }
+
+      // Shutdown the DN first, to verify that calling diskbalancer APIs on
+      // uninitialized DN doesn't NPE
+      dnNode.shutdown();
+      assertEquals("", dnNode.getDiskBalancerStatus());
     } finally {
       cluster.shutdown();
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: HDFS-13715:diskbalancer does not work if one of the blockpools are empty on a Federated cluster. Contributed by Bharat Viswanadham

Posted by vi...@apache.org.
HDFS-13715:diskbalancer does not work if one of the blockpools are empty on a Federated cluster. Contributed by Bharat Viswanadham


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59a3038b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59a3038b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59a3038b

Branch: refs/heads/HDFS-12090
Commit: 59a3038bc3d7913dca3de971026bc32cef536a2d
Parents: 7296b64
Author: Bharat Viswanadham <bh...@apache.org>
Authored: Mon Jul 2 21:43:18 2018 -0700
Committer: Bharat Viswanadham <bh...@apache.org>
Committed: Mon Jul 2 21:43:18 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/DiskBalancer.java      | 29 +++++---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  8 +++
 .../server/diskbalancer/TestDiskBalancer.java   | 75 ++++++++++++++++++--
 3 files changed, 96 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a3038b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 91c3624..53db022 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -77,7 +78,8 @@ import static java.util.concurrent.TimeUnit.SECONDS;
 @InterfaceAudience.Private
 public class DiskBalancer {
 
-  private static final Logger LOG = LoggerFactory.getLogger(DiskBalancer
+  @VisibleForTesting
+  public static final Logger LOG = LoggerFactory.getLogger(DiskBalancer
       .class);
   private final FsDatasetSpi<?> dataset;
   private final String dataNodeUUID;
@@ -902,15 +904,19 @@ public class DiskBalancer {
         try {
           ExtendedBlock block = iter.nextBlock();
 
-          // A valid block is a finalized block, we iterate until we get
-          // finalized blocks
-          if (!this.dataset.isValidBlock(block)) {
-            continue;
-          }
+          if (block != null) {
+            // A valid block is a finalized block, we iterate until we get
+            // finalized blocks
+            if (!this.dataset.isValidBlock(block)) {
+              continue;
+            }
 
-          // We don't look for the best, we just do first fit
-          if (isLessThanNeeded(block.getNumBytes(), item)) {
-            return block;
+            // We don't look for the best, we just do first fit
+            if (isLessThanNeeded(block.getNumBytes(), item)) {
+              return block;
+            }
+          } else {
+            LOG.info("There are no blocks in the blockPool {}", iter.getBlockPoolId());
           }
 
         } catch (IOException e) {
@@ -1126,6 +1132,11 @@ public class DiskBalancer {
             Thread.currentThread().interrupt();
             item.incErrorCount();
             this.setExitFlag();
+          } catch (RuntimeException ex) {
+            // Exiting if any run time exceptions.
+            LOG.error("Got an unexpected Runtime Exception {}", ex);
+            item.incErrorCount();
+            this.setExitFlag();
           }
         }
       } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a3038b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 695a421..89c278a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1927,6 +1927,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   @Override // FsDatasetSpi
   public boolean isValidBlock(ExtendedBlock b) {
+    // If block passed is null, we should return false.
+    if (b == null) {
+      return false;
+    }
     return isValid(b, ReplicaState.FINALIZED);
   }
   
@@ -1935,6 +1939,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   @Override // {@link FsDatasetSpi}
   public boolean isValidRbw(final ExtendedBlock b) {
+    // If block passed is null, we should return false.
+    if (b == null) {
+      return false;
+    }
     return isValid(b, ReplicaState.RBW);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/59a3038b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
index deae6ea..55cc57e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java
@@ -154,7 +154,7 @@ public class TestDiskBalancer {
       NodePlan plan = dataMover.generatePlan();
       dataMover.executePlan(plan);
       dataMover.verifyPlanExectionDone();
-      dataMover.verifyAllVolumesHaveData();
+      dataMover.verifyAllVolumesHaveData(true);
       dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
     } finally {
       cluster.shutdown();
@@ -209,7 +209,7 @@ public class TestDiskBalancer {
       NodePlan plan = dataMover.generatePlan();
       dataMover.executePlan(plan);
       dataMover.verifyPlanExectionDone();
-      dataMover.verifyAllVolumesHaveData();
+      dataMover.verifyAllVolumesHaveData(true);
       dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
     } finally {
       cluster.shutdown();
@@ -217,6 +217,66 @@ public class TestDiskBalancer {
 
   }
 
+
+  @Test
+  public void testDiskBalancerWithFedClusterWithOneNameServiceEmpty() throws
+      Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
+    final int blockCount = 100;
+    final int blockSize = 1024;
+    final int diskCount = 2;
+    final int dataNodeCount = 1;
+    final int dataNodeIndex = 0;
+    final int sourceDiskIndex = 0;
+    final long cap = blockSize * 3L * blockCount;
+
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize);
+
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+        .numDataNodes(dataNodeCount)
+        .storagesPerDatanode(diskCount)
+        .storageCapacities(new long[] {cap, cap})
+        .build();
+    cluster.waitActive();
+
+    DFSTestUtil.setFederatedConfiguration(cluster, conf);
+
+    final String fileName = "/tmp.txt";
+    final Path filePath = new Path(fileName);
+    long fileLen = blockCount * blockSize;
+
+
+    //Writing data only to one nameservice.
+    FileSystem fs = cluster.getFileSystem(0);
+    TestBalancer.createFile(cluster, filePath, fileLen, (short) 1,
+        0);
+    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
+
+
+    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
+        .captureLogs(DiskBalancer.LOG);
+
+    try {
+      DataMover dataMover = new DataMover(cluster, dataNodeIndex,
+          sourceDiskIndex, conf, blockSize, blockCount);
+      dataMover.moveDataToSourceDisk();
+      NodePlan plan = dataMover.generatePlan();
+      dataMover.executePlan(plan);
+      dataMover.verifyPlanExectionDone();
+      //Because here we have one nameservice empty, don't check
+      // blockPoolCount.
+      dataMover.verifyAllVolumesHaveData(false);
+    } finally {
+      Assert.assertTrue(logCapturer.getOutput().contains("There are no " +
+          "blocks in the blockPool"));
+      cluster.shutdown();
+    }
+
+  }
+
   @Test
   public void testBalanceDataBetweenMultiplePairsOfVolumes()
       throws Exception {
@@ -255,7 +315,7 @@ public class TestDiskBalancer {
 
       dataMover.executePlan(plan);
       dataMover.verifyPlanExectionDone();
-      dataMover.verifyAllVolumesHaveData();
+      dataMover.verifyAllVolumesHaveData(true);
       dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
     } finally {
       cluster.shutdown();
@@ -296,7 +356,7 @@ public class TestDiskBalancer {
       dataMover.moveDataToSourceDisk();
       NodePlan plan = dataMover.generatePlan();
       dataMover.executePlanDuringDiskRemove(plan);
-      dataMover.verifyAllVolumesHaveData();
+      dataMover.verifyAllVolumesHaveData(true);
       dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
     } catch (Exception e) {
       Assert.fail("Unexpected exception: " + e);
@@ -651,14 +711,15 @@ public class TestDiskBalancer {
     /**
      * Once diskBalancer is run, all volumes mush has some data.
      */
-    public void verifyAllVolumesHaveData() throws IOException {
+    public void verifyAllVolumesHaveData(boolean checkblockPoolCount) throws
+        IOException {
       node = cluster.getDataNodes().get(dataNodeIndex);
       try (FsDatasetSpi.FsVolumeReferences refs =
                node.getFSDataset().getFsVolumeReferences()) {
         for (FsVolumeSpi volume : refs) {
-          assertTrue(DiskBalancerTestUtil.getBlockCount(volume, true) > 0);
+          assertTrue(DiskBalancerTestUtil.getBlockCount(volume, checkblockPoolCount) > 0);
           LOG.info("{} : Block Count : {}", refs, DiskBalancerTestUtil
-              .getBlockCount(volume, true));
+              .getBlockCount(volume, checkblockPoolCount));
         }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java
deleted file mode 100644
index bf7d870..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java
+++ /dev/null
@@ -1,306 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-
-import java.io.IOException;
-
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.test.Whitebox;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-/**
- * Test for KSM metrics.
- */
-public class TestKSMMetrcis {
-  private MiniOzoneCluster cluster;
-  private KeySpaceManager ksmManager;
-
-  /**
-   * The exception used for testing failure metrics.
-   */
-  private IOException exception = new IOException();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @Before
-  public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    ksmManager = cluster.getKeySpaceManager();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testVolumeOps() throws IOException {
-    VolumeManager volumeManager = (VolumeManager) Whitebox
-        .getInternalState(ksmManager, "volumeManager");
-    VolumeManager mockVm = Mockito.spy(volumeManager);
-
-    Mockito.doNothing().when(mockVm).createVolume(null);
-    Mockito.doNothing().when(mockVm).deleteVolume(null);
-    Mockito.doReturn(null).when(mockVm).getVolumeInfo(null);
-    Mockito.doReturn(true).when(mockVm).checkVolumeAccess(null, null);
-    Mockito.doNothing().when(mockVm).setOwner(null, null);
-    Mockito.doReturn(null).when(mockVm).listVolumes(null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "volumeManager", mockVm);
-    doVolumeOps();
-
-    MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumVolumeOps", 6L, ksmMetrics);
-    assertCounter("NumVolumeCreates", 1L, ksmMetrics);
-    assertCounter("NumVolumeUpdates", 1L, ksmMetrics);
-    assertCounter("NumVolumeInfos", 1L, ksmMetrics);
-    assertCounter("NumVolumeCheckAccesses", 1L, ksmMetrics);
-    assertCounter("NumVolumeDeletes", 1L, ksmMetrics);
-    assertCounter("NumVolumeLists", 1L, ksmMetrics);
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockVm).createVolume(null);
-    Mockito.doThrow(exception).when(mockVm).deleteVolume(null);
-    Mockito.doThrow(exception).when(mockVm).getVolumeInfo(null);
-    Mockito.doThrow(exception).when(mockVm).checkVolumeAccess(null, null);
-    Mockito.doThrow(exception).when(mockVm).setOwner(null, null);
-    Mockito.doThrow(exception).when(mockVm).listVolumes(null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "volumeManager", mockVm);
-    doVolumeOps();
-
-    ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumVolumeOps", 12L, ksmMetrics);
-    assertCounter("NumVolumeCreates", 2L, ksmMetrics);
-    assertCounter("NumVolumeUpdates", 2L, ksmMetrics);
-    assertCounter("NumVolumeInfos", 2L, ksmMetrics);
-    assertCounter("NumVolumeCheckAccesses", 2L, ksmMetrics);
-    assertCounter("NumVolumeDeletes", 2L, ksmMetrics);
-    assertCounter("NumVolumeLists", 2L, ksmMetrics);
-
-    assertCounter("NumVolumeCreateFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeUpdateFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeInfoFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeCheckAccessFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeDeleteFails", 1L, ksmMetrics);
-    assertCounter("NumVolumeListFails", 1L, ksmMetrics);
-  }
-
-  @Test
-  public void testBucketOps() throws IOException {
-    BucketManager bucketManager = (BucketManager) Whitebox
-        .getInternalState(ksmManager, "bucketManager");
-    BucketManager mockBm = Mockito.spy(bucketManager);
-
-    Mockito.doNothing().when(mockBm).createBucket(null);
-    Mockito.doNothing().when(mockBm).deleteBucket(null, null);
-    Mockito.doReturn(null).when(mockBm).getBucketInfo(null, null);
-    Mockito.doNothing().when(mockBm).setBucketProperty(null);
-    Mockito.doReturn(null).when(mockBm).listBuckets(null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "bucketManager", mockBm);
-    doBucketOps();
-
-    MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumBucketOps", 5L, ksmMetrics);
-    assertCounter("NumBucketCreates", 1L, ksmMetrics);
-    assertCounter("NumBucketUpdates", 1L, ksmMetrics);
-    assertCounter("NumBucketInfos", 1L, ksmMetrics);
-    assertCounter("NumBucketDeletes", 1L, ksmMetrics);
-    assertCounter("NumBucketLists", 1L, ksmMetrics);
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockBm).createBucket(null);
-    Mockito.doThrow(exception).when(mockBm).deleteBucket(null, null);
-    Mockito.doThrow(exception).when(mockBm).getBucketInfo(null, null);
-    Mockito.doThrow(exception).when(mockBm).setBucketProperty(null);
-    Mockito.doThrow(exception).when(mockBm).listBuckets(null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "bucketManager", mockBm);
-    doBucketOps();
-
-    ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumBucketOps", 10L, ksmMetrics);
-    assertCounter("NumBucketCreates", 2L, ksmMetrics);
-    assertCounter("NumBucketUpdates", 2L, ksmMetrics);
-    assertCounter("NumBucketInfos", 2L, ksmMetrics);
-    assertCounter("NumBucketDeletes", 2L, ksmMetrics);
-    assertCounter("NumBucketLists", 2L, ksmMetrics);
-
-    assertCounter("NumBucketCreateFails", 1L, ksmMetrics);
-    assertCounter("NumBucketUpdateFails", 1L, ksmMetrics);
-    assertCounter("NumBucketInfoFails", 1L, ksmMetrics);
-    assertCounter("NumBucketDeleteFails", 1L, ksmMetrics);
-    assertCounter("NumBucketListFails", 1L, ksmMetrics);
-  }
-
-  @Test
-  public void testKeyOps() throws IOException {
-    KeyManager bucketManager = (KeyManager) Whitebox
-        .getInternalState(ksmManager, "keyManager");
-    KeyManager mockKm = Mockito.spy(bucketManager);
-
-    Mockito.doReturn(null).when(mockKm).openKey(null);
-    Mockito.doNothing().when(mockKm).deleteKey(null);
-    Mockito.doReturn(null).when(mockKm).lookupKey(null);
-    Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "keyManager", mockKm);
-    doKeyOps();
-
-    MetricsRecordBuilder ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumKeyOps", 4L, ksmMetrics);
-    assertCounter("NumKeyAllocate", 1L, ksmMetrics);
-    assertCounter("NumKeyLookup", 1L, ksmMetrics);
-    assertCounter("NumKeyDeletes", 1L, ksmMetrics);
-    assertCounter("NumKeyLists", 1L, ksmMetrics);
-
-    // inject exception to test for Failure Metrics
-    Mockito.doThrow(exception).when(mockKm).openKey(null);
-    Mockito.doThrow(exception).when(mockKm).deleteKey(null);
-    Mockito.doThrow(exception).when(mockKm).lookupKey(null);
-    Mockito.doThrow(exception).when(mockKm).listKeys(
-        null, null, null, null, 0);
-
-    Whitebox.setInternalState(ksmManager, "keyManager", mockKm);
-    doKeyOps();
-
-    ksmMetrics = getMetrics("KSMMetrics");
-    assertCounter("NumKeyOps", 8L, ksmMetrics);
-    assertCounter("NumKeyAllocate", 2L, ksmMetrics);
-    assertCounter("NumKeyLookup", 2L, ksmMetrics);
-    assertCounter("NumKeyDeletes", 2L, ksmMetrics);
-    assertCounter("NumKeyLists", 2L, ksmMetrics);
-
-    assertCounter("NumKeyAllocateFails", 1L, ksmMetrics);
-    assertCounter("NumKeyLookupFails", 1L, ksmMetrics);
-    assertCounter("NumKeyDeleteFails", 1L, ksmMetrics);
-    assertCounter("NumKeyListFails", 1L, ksmMetrics);
-  }
-
-  /**
-   * Test volume operations with ignoring thrown exception.
-   */
-  private void doVolumeOps() {
-    try {
-      ksmManager.createVolume(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.deleteVolume(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.getVolumeInfo(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.checkVolumeAccess(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.setOwner(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.listAllVolumes(null, null, 0);
-    } catch (IOException ignored) {
-    }
-  }
-
-  /**
-   * Test bucket operations with ignoring thrown exception.
-   */
-  private void doBucketOps() {
-    try {
-      ksmManager.createBucket(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.deleteBucket(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.getBucketInfo(null, null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.setBucketProperty(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.listBuckets(null, null, null, 0);
-    } catch (IOException ignored) {
-    }
-  }
-
-  /**
-   * Test key operations with ignoring thrown exception.
-   */
-  private void doKeyOps() {
-    try {
-      ksmManager.openKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.deleteKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.lookupKey(null);
-    } catch (IOException ignored) {
-    }
-
-    try {
-      ksmManager.listKeys(null, null, null, null, 0);
-    } catch (IOException ignored) {
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java
deleted file mode 100644
index 7b92ec7..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java
+++ /dev/null
@@ -1,284 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.scm.cli.SQLCLI;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneConsts.KSM_DB_NAME;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-/**
- * This class tests the CLI that transforms ksm.db into SQLite DB files.
- */
-@RunWith(Parameterized.class)
-public class TestKSMSQLCli {
-  private MiniOzoneCluster cluster = null;
-  private StorageHandler storageHandler;
-  private UserArgs userArgs;
-  private OzoneConfiguration conf;
-  private SQLCLI cli;
-
-  private String userName = "userTest";
-  private String adminName = "adminTest";
-  private String volumeName0 = "volumeTest0";
-  private String volumeName1 = "volumeTest1";
-  private String bucketName0 = "bucketTest0";
-  private String bucketName1 = "bucketTest1";
-  private String bucketName2 = "bucketTest2";
-  private String keyName0 = "key0";
-  private String keyName1 = "key1";
-  private String keyName2 = "key2";
-  private String keyName3 = "key3";
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][] {
-        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
-        {OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
-    });
-  }
-
-  private String metaStoreType;
-
-  public TestKSMSQLCli(String type) {
-    metaStoreType = type;
-  }
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @Before
-  public void setup() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    cluster.waitForClusterToBeReady();
-
-    VolumeArgs createVolumeArgs0 = new VolumeArgs(volumeName0, userArgs);
-    createVolumeArgs0.setUserName(userName);
-    createVolumeArgs0.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs0);
-    VolumeArgs createVolumeArgs1 = new VolumeArgs(volumeName1, userArgs);
-    createVolumeArgs1.setUserName(userName);
-    createVolumeArgs1.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs1);
-
-    BucketArgs bucketArgs0 = new BucketArgs(volumeName0, bucketName0, userArgs);
-    storageHandler.createBucket(bucketArgs0);
-    BucketArgs bucketArgs1 = new BucketArgs(volumeName1, bucketName1, userArgs);
-    storageHandler.createBucket(bucketArgs1);
-    BucketArgs bucketArgs2 = new BucketArgs(volumeName0, bucketName2, userArgs);
-    storageHandler.createBucket(bucketArgs2);
-
-    KeyArgs keyArgs0 =
-        new KeyArgs(volumeName0, bucketName0, keyName0, userArgs);
-    keyArgs0.setSize(100);
-    KeyArgs keyArgs1 =
-        new KeyArgs(volumeName1, bucketName1, keyName1, userArgs);
-    keyArgs1.setSize(200);
-    KeyArgs keyArgs2 =
-        new KeyArgs(volumeName0, bucketName2, keyName2, userArgs);
-    keyArgs2.setSize(300);
-    KeyArgs keyArgs3 =
-        new KeyArgs(volumeName0, bucketName2, keyName3, userArgs);
-    keyArgs3.setSize(400);
-
-    OutputStream stream = storageHandler.newKeyWriter(keyArgs0);
-    stream.close();
-    stream = storageHandler.newKeyWriter(keyArgs1);
-    stream.close();
-    stream = storageHandler.newKeyWriter(keyArgs2);
-    stream.close();
-    stream = storageHandler.newKeyWriter(keyArgs3);
-    stream.close();
-
-    cluster.getKeySpaceManager().stop();
-    cluster.getStorageContainerManager().stop();
-    conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, metaStoreType);
-    cli = new SQLCLI(conf);
-  }
-
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testKSMDB() throws Exception {
-    String dbOutPath =  GenericTestUtils.getTempPath(
-        UUID.randomUUID() + "/out_sql.db");
-
-    String dbRootPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
-    String dbPath = dbRootPath + "/" + KSM_DB_NAME;
-    String[] args = {"-p", dbPath, "-o", dbOutPath};
-
-    cli.run(args);
-
-    Connection conn = connectDB(dbOutPath);
-    String sql = "SELECT * FROM volumeList";
-    ResultSet rs = executeQuery(conn, sql);
-    List<String> expectedValues =
-        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String userNameRs = rs.getString("userName");
-      String volumeNameRs = rs.getString("volumeName");
-      assertEquals(userName,  userNameRs.substring(1));
-      assertTrue(expectedValues.remove(volumeNameRs));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM volumeInfo";
-    rs = executeQuery(conn, sql);
-    expectedValues =
-        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String adName = rs.getString("adminName");
-      String ownerName = rs.getString("ownerName");
-      String volumeName = rs.getString("volumeName");
-      assertEquals(adminName, adName);
-      assertEquals(userName, ownerName);
-      assertTrue(expectedValues.remove(volumeName));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM aclInfo";
-    rs = executeQuery(conn, sql);
-    expectedValues =
-        new LinkedList<>(Arrays.asList(volumeName0, volumeName1));
-    while (rs.next()) {
-      String adName = rs.getString("adminName");
-      String ownerName = rs.getString("ownerName");
-      String volumeName = rs.getString("volumeName");
-      String type = rs.getString("type");
-      String uName = rs.getString("userName");
-      String rights = rs.getString("rights");
-      assertEquals(adminName, adName);
-      assertEquals(userName, ownerName);
-      assertEquals("USER", type);
-      assertEquals(userName, uName);
-      assertEquals("READ_WRITE", rights);
-      assertTrue(expectedValues.remove(volumeName));
-    }
-    assertEquals(0, expectedValues.size());
-
-    sql = "SELECT * FROM bucketInfo";
-    rs = executeQuery(conn, sql);
-    HashMap<String, String> expectedMap = new HashMap<>();
-    expectedMap.put(bucketName0, volumeName0);
-    expectedMap.put(bucketName2, volumeName0);
-    expectedMap.put(bucketName1, volumeName1);
-    while (rs.next()) {
-      String volumeName = rs.getString("volumeName");
-      String bucketName = rs.getString("bucketName");
-      boolean versionEnabled = rs.getBoolean("versionEnabled");
-      String storegeType = rs.getString("storageType");
-      assertEquals(volumeName, expectedMap.remove(bucketName));
-      assertFalse(versionEnabled);
-      assertEquals("DISK", storegeType);
-    }
-    assertEquals(0, expectedMap.size());
-
-    sql = "SELECT * FROM keyInfo";
-    rs = executeQuery(conn, sql);
-    HashMap<String, List<String>> expectedMap2 = new HashMap<>();
-    // no data written, data size will be 0
-    expectedMap2.put(keyName0,
-        Arrays.asList(volumeName0, bucketName0, "0"));
-    expectedMap2.put(keyName1,
-        Arrays.asList(volumeName1, bucketName1, "0"));
-    expectedMap2.put(keyName2,
-        Arrays.asList(volumeName0, bucketName2, "0"));
-    expectedMap2.put(keyName3,
-        Arrays.asList(volumeName0, bucketName2, "0"));
-    while (rs.next()) {
-      String volumeName = rs.getString("volumeName");
-      String bucketName = rs.getString("bucketName");
-      String keyName = rs.getString("keyName");
-      int dataSize = rs.getInt("dataSize");
-      List<String> vals = expectedMap2.remove(keyName);
-      assertNotNull(vals);
-      assertEquals(vals.get(0), volumeName);
-      assertEquals(vals.get(1), bucketName);
-      assertEquals(vals.get(2), Integer.toString(dataSize));
-    }
-    assertEquals(0, expectedMap2.size());
-
-    conn.close();
-    Files.delete(Paths.get(dbOutPath));
-  }
-
-  private ResultSet executeQuery(Connection conn, String sql)
-      throws SQLException {
-    Statement stmt = conn.createStatement();
-    return stmt.executeQuery(sql);
-  }
-
-  private Connection connectDB(String dbPath) throws Exception {
-    Class.forName("org.sqlite.JDBC");
-    String connectPath =
-        String.format("jdbc:sqlite:%s", dbPath);
-    return DriverManager.getConnection(connectPath);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
deleted file mode 100644
index 8a16bfe..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java
+++ /dev/null
@@ -1,1350 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.ksm;
-
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.hdds.scm.server.SCMStorage;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.response.KeyInfo;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.web.handlers.ListArgs;
-import org.apache.hadoop.ozone.web.response.ListBuckets;
-import org.apache.hadoop.ozone.web.response.ListKeys;
-import org.apache.hadoop.ozone.web.response.ListVolumes;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.BackgroundService;
-import org.apache.hadoop.utils.MetadataKeyFilters;
-import org.apache.hadoop.utils.MetadataStore;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.net.InetSocketAddress;
-import java.text.ParseException;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.List;
-import java.util.UUID;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX;
-import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_CLIENT_ADDRESS_KEY;
-
-/**
- * Test Key Space Manager operation in distributed handler scenario.
- */
-public class TestKeySpaceManager {
-  private static MiniOzoneCluster cluster = null;
-  private static StorageHandler storageHandler;
-  private static UserArgs userArgs;
-  private static KSMMetrics ksmMetrics;
-  private static OzoneConfiguration conf;
-  private static String clusterId;
-  private static String scmId;
-  private static String ksmId;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true and
-   * OZONE_HANDLER_TYPE_KEY = "distributed"
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    ksmId = UUID.randomUUID().toString();
-    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY,
-        OzoneConsts.OZONE_HANDLER_DISTRIBUTED);
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    cluster =  MiniOzoneCluster.newBuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setKsmId(ksmId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    ksmMetrics = cluster.getKeySpaceManager().getMetrics();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  // Create a volume and test its attribute after creating them
-  @Test(timeout = 60000)
-  public void testCreateVolume() throws IOException, OzoneException {
-    long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeinfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(volumeCreateFailCount,
-        ksmMetrics.getNumVolumeCreateFails());
-  }
-
-  // Create a volume and modify the volume owner and then test its attributes
-  @Test(timeout = 60000)
-  public void testChangeVolumeOwner() throws IOException, OzoneException {
-    long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails();
-    long volumeInfoFailCount = ksmMetrics.getNumVolumeInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    String newUserName = "user" + RandomStringUtils.randomNumeric(5);
-    createVolumeArgs.setUserName(newUserName);
-    storageHandler.setVolumeOwner(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertFalse(retVolumeInfo.getOwner().getName().equals(userName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(newUserName));
-    Assert.assertEquals(volumeCreateFailCount,
-        ksmMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(volumeInfoFailCount,
-        ksmMetrics.getNumVolumeInfoFails());
-  }
-
-  // Create a volume and modify the volume owner and then test its attributes
-  @Test(timeout = 60000)
-  public void testChangeVolumeQuota() throws IOException, OzoneException {
-    long numVolumeCreateFail = ksmMetrics.getNumVolumeCreateFails();
-    long numVolumeInfoFail = ksmMetrics.getNumVolumeInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    Random rand = new Random();
-
-    // Create a new volume with a quota
-    OzoneQuota createQuota =
-        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    createVolumeArgs.setQuota(createQuota);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(createQuota.sizeInBytes(),
-        retVolumeInfo.getQuota().sizeInBytes());
-
-    // Set a new quota and test it
-    OzoneQuota setQuota =
-        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
-    createVolumeArgs.setQuota(setQuota);
-    storageHandler.setVolumeQuota(createVolumeArgs, false);
-    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(setQuota.sizeInBytes(),
-        retVolumeInfo.getQuota().sizeInBytes());
-
-    // Remove the quota and test it again
-    storageHandler.setVolumeQuota(createVolumeArgs, true);
-    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(OzoneConsts.MAX_QUOTA_IN_BYTES,
-        retVolumeInfo.getQuota().sizeInBytes());
-    Assert.assertEquals(numVolumeCreateFail,
-        ksmMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(numVolumeInfoFail,
-        ksmMetrics.getNumVolumeInfoFails());
-  }
-
-  // Create a volume and then delete it and then check for deletion
-  @Test(timeout = 60000)
-  public void testDeleteVolume() throws IOException, OzoneException {
-    long volumeCreateFailCount = ksmMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String volumeName1 = volumeName + "_A";
-    String volumeName2 = volumeName + "_AA";
-    VolumeArgs volumeArgs = null;
-    VolumeInfo volumeInfo = null;
-
-    // Create 2 empty volumes with same prefix.
-    volumeArgs = new VolumeArgs(volumeName1, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    volumeArgs = new VolumeArgs(volumeName2, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    volumeArgs  = new VolumeArgs(volumeName1, userArgs);
-    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
-    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName1));
-    Assert.assertTrue(volumeInfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(volumeCreateFailCount,
-        ksmMetrics.getNumVolumeCreateFails());
-
-    // Volume with _A should be able to delete as it is empty.
-    storageHandler.deleteVolume(volumeArgs);
-
-    // Make sure volume with _AA suffix still exists.
-    volumeArgs = new VolumeArgs(volumeName2, userArgs);
-    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
-    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName2));
-
-    // Make sure volume with _A suffix is successfully deleted.
-    exception.expect(IOException.class);
-    exception.expectMessage("Info Volume failed, error:VOLUME_NOT_FOUND");
-    volumeArgs = new VolumeArgs(volumeName1, userArgs);
-    storageHandler.getVolumeInfo(volumeArgs);
-  }
-
-  // Create a volume and a bucket inside the volume,
-  // then delete it and then check for deletion failure
-  @Test(timeout = 60000)
-  public void testFailedDeleteVolume() throws IOException, OzoneException {
-    long numVolumeCreateFails = ksmMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(numVolumeCreateFails,
-        ksmMetrics.getNumVolumeCreateFails());
-
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    try {
-      storageHandler.deleteVolume(createVolumeArgs);
-      Assert.fail("Expecting deletion should fail "
-          + "because volume is not empty");
-    } catch (IOException ex) {
-      Assert.assertEquals(ex.getMessage(),
-          "Delete Volume failed, error:VOLUME_NOT_EMPTY");
-    }
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
-  }
-
-  // Create a volume and test Volume access for a different user
-  @Test(timeout = 60000)
-  public void testAccessVolume() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String[] groupName =
-        {"group" + RandomStringUtils.randomNumeric(5)};
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    createVolumeArgs.setGroups(groupName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    OzoneAcl userAcl = new OzoneAcl(OzoneAcl.OzoneACLType.USER, userName,
-        OzoneAcl.OzoneACLRights.READ_WRITE);
-    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, userAcl));
-    OzoneAcl group = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, groupName[0],
-        OzoneAcl.OzoneACLRights.READ);
-    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, group));
-
-    // Create a different user and access should fail
-    String falseUserName = "user" + RandomStringUtils.randomNumeric(5);
-    OzoneAcl falseUserAcl =
-        new OzoneAcl(OzoneAcl.OzoneACLType.USER, falseUserName,
-            OzoneAcl.OzoneACLRights.READ_WRITE);
-    Assert.assertFalse(storageHandler
-        .checkVolumeAccess(volumeName, falseUserAcl));
-    // Checking access with user name and Group Type should fail
-    OzoneAcl falseGroupAcl = new OzoneAcl(OzoneAcl.OzoneACLType.GROUP, userName,
-        OzoneAcl.OzoneACLRights.READ_WRITE);
-    Assert.assertFalse(storageHandler
-        .checkVolumeAccess(volumeName, falseGroupAcl));
-
-    // Access for acl type world should also fail
-    OzoneAcl worldAcl =
-        new OzoneAcl(OzoneAcl.OzoneACLType.WORLD, "",
-            OzoneAcl.OzoneACLRights.READ);
-    Assert.assertFalse(storageHandler.checkVolumeAccess(volumeName, worldAcl));
-
-    Assert.assertEquals(0, ksmMetrics.getNumVolumeCheckAccessFails());
-    Assert.assertEquals(0, ksmMetrics.getNumVolumeCreateFails());
-  }
-
-  @Test(timeout = 60000)
-  public void testCreateBucket() throws IOException, OzoneException {
-    long numVolumeCreateFail = ksmMetrics.getNumVolumeCreateFails();
-    long numBucketCreateFail = ksmMetrics.getNumBucketCreateFails();
-    long numBucketInfoFail = ksmMetrics.getNumBucketInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    Assert.assertEquals(numVolumeCreateFail,
-        ksmMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(numBucketCreateFail,
-        ksmMetrics.getNumBucketCreateFails());
-    Assert.assertEquals(numBucketInfoFail,
-        ksmMetrics.getNumBucketInfoFails());
-  }
-
-  @Test(timeout = 60000)
-  public void testDeleteBucket() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    storageHandler.deleteBucket(bucketArgs);
-    exception.expect(IOException.class);
-    exception.expectMessage("Info Bucket failed, error: BUCKET_NOT_FOUND");
-    storageHandler.getBucketInfo(getBucketArgs);
-  }
-
-  @Test(timeout = 60000)
-  public void testDeleteNonExistingBucket() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    BucketArgs newBucketArgs = new BucketArgs(
-        volumeName, bucketName + "_invalid", userArgs);
-    exception.expect(IOException.class);
-    exception.expectMessage("Delete Bucket failed, error:BUCKET_NOT_FOUND");
-    storageHandler.deleteBucket(newBucketArgs);
-  }
-
-
-  @Test(timeout = 60000)
-  public void testDeleteNonEmptyBucket() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    String dataString = RandomStringUtils.randomAscii(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    exception.expect(IOException.class);
-    exception.expectMessage("Delete Bucket failed, error:BUCKET_NOT_EMPTY");
-    storageHandler.deleteBucket(bucketArgs);
-  }
-
-  /**
-   * Basic test of both putKey and getKey from KSM, as one can not be tested
-   * without the other.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetKeyWriterReader() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyAllocates = ksmMetrics.getNumKeyAllocates();
-    long numKeyLookups = ksmMetrics.getNumKeyLookups();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    Assert.assertEquals(1 + numKeyAllocates, ksmMetrics.getNumKeyAllocates());
-
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
-    Assert.assertEquals(1 + numKeyLookups, ksmMetrics.getNumKeyLookups());
-  }
-
-  /**
-   * Test write the same key twice, the second write should fail, as currently
-   * key overwrite is not supported.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testKeyOverwrite() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyAllocateFails = ksmMetrics.getNumKeyAllocateFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    // We allow the key overwrite to be successful. Please note : Till
-    // HDFS-11922 is fixed this causes a data block leak on the data node side.
-    // That is this overwrite only overwrites the keys on KSM. We need to
-    // garbage collect those blocks from datanode.
-    KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    storageHandler.newKeyWriter(keyArgs2);
-    Assert
-        .assertEquals(numKeyAllocateFails, ksmMetrics.getNumKeyAllocateFails());
-  }
-
-  /**
-   * Test get a non-exiting key.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetNonExistKey() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyLookupFails = ksmMetrics.getNumKeyLookupFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    // try to get the key, should fail as it hasn't been created
-    exception.expect(IOException.class);
-    exception.expectMessage("KEY_NOT_FOUND");
-    storageHandler.newKeyReader(keyArgs);
-    Assert.assertEquals(1 + numKeyLookupFails,
-        ksmMetrics.getNumKeyLookupFails());
-  }
-
-  /**
-   * Test delete keys for ksm.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testDeleteKey() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyDeletes = ksmMetrics.getNumKeyDeletes();
-    long numKeyDeleteFails = ksmMetrics.getNumKeyDeletesFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    storageHandler.deleteKey(keyArgs);
-    Assert.assertEquals(1 + numKeyDeletes, ksmMetrics.getNumKeyDeletes());
-
-    // Make sure the deleted key has been renamed.
-    MetadataStore store = cluster.getKeySpaceManager().
-        getMetadataManager().getStore();
-    List<Map.Entry<byte[], byte[]>> list = store.getRangeKVs(null, 10,
-        new MetadataKeyFilters.KeyPrefixFilter()
-            .addFilter(DELETING_KEY_PREFIX));
-    Assert.assertEquals(1, list.size());
-
-    // Delete the key again to test deleting non-existing key.
-    try {
-      storageHandler.deleteKey(keyArgs);
-      Assert.fail("Expected exception not thrown.");
-    } catch (IOException ioe) {
-      Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND"));
-    }
-    Assert.assertEquals(1 + numKeyDeleteFails,
-        ksmMetrics.getNumKeyDeletesFails());
-  }
-
-  /**
-   * Test rename key for ksm.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testRenameKey() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyRenames = ksmMetrics.getNumKeyRenames();
-    long numKeyRenameFails = ksmMetrics.getNumKeyRenameFails();
-    int testRenameFails = 0;
-    int testRenames = 0;
-    IOException ioe = null;
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    String toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    // Rename from non-existent key should fail
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (IOException e) {
-      testRenameFails++;
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
-
-    // Write the contents of the key to be renamed
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    // Rename the key
-    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-    testRenames++;
-    storageHandler.renameKey(keyArgs, toKeyName);
-    Assert.assertEquals(numKeyRenames + testRenames,
-        ksmMetrics.getNumKeyRenames());
-    Assert.assertEquals(numKeyRenameFails + testRenameFails,
-        ksmMetrics.getNumKeyRenameFails());
-
-    // Try to get the key, should fail as it has been renamed
-    try {
-      storageHandler.newKeyReader(keyArgs);
-    } catch (IOException e) {
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("KEY_NOT_FOUND"));
-
-    // Verify the contents of the renamed key
-    keyArgs = new KeyArgs(toKeyName, bucketArgs);
-    InputStream in = storageHandler.newKeyReader(keyArgs);
-    byte[] b = new byte[dataString.getBytes().length];
-    in.read(b);
-    Assert.assertEquals(new String(b), dataString);
-
-    // Rewrite the renamed key. Rename to key which already exists should fail.
-    keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-      stream.close();
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (IOException e) {
-      testRenameFails++;
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
-
-    // Rename to empty string should fail
-    toKeyName = "";
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (IOException e) {
-      testRenameFails++;
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
-
-    // Rename from empty string should fail
-    keyArgs = new KeyArgs("", bucketArgs);
-    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (IOException e) {
-      testRenameFails++;
-      ioe = e;
-    }
-    Assert.assertTrue(ioe.getMessage().contains("Rename key failed, error"));
-
-    Assert.assertEquals(numKeyRenames + testRenames,
-        ksmMetrics.getNumKeyRenames());
-    Assert.assertEquals(numKeyRenameFails + testRenameFails,
-        ksmMetrics.getNumKeyRenameFails());
-  }
-
-  @Test(timeout = 60000)
-  public void testListBuckets() throws IOException, OzoneException {
-    ListBuckets result = null;
-    ListArgs listBucketArgs = null;
-
-    // Create volume - volA.
-    final String volAname = "volA";
-    VolumeArgs volAArgs = new VolumeArgs(volAname, userArgs);
-    volAArgs.setUserName("userA");
-    volAArgs.setAdminName("adminA");
-    storageHandler.createVolume(volAArgs);
-
-    // Create 20 buckets in volA for tests.
-    for (int i=0; i<10; i++) {
-      // Create "/volA/aBucket_0" to "/volA/aBucket_9" buckets in volA volume.
-      BucketArgs aBuckets = new BucketArgs(volAname,
-          "aBucket_" + i, userArgs);
-      if(i % 3 == 0) {
-        aBuckets.setStorageType(StorageType.ARCHIVE);
-      } else {
-        aBuckets.setStorageType(StorageType.DISK);
-      }
-      storageHandler.createBucket(aBuckets);
-
-      // Create "/volA/bBucket_0" to "/volA/bBucket_9" buckets in volA volume.
-      BucketArgs bBuckets = new BucketArgs(volAname,
-          "bBucket_" + i, userArgs);
-      if(i % 3 == 0) {
-        bBuckets.setStorageType(StorageType.RAM_DISK);
-      } else {
-        bBuckets.setStorageType(StorageType.SSD);
-      }
-      storageHandler.createBucket(bBuckets);
-    }
-
-    VolumeArgs volArgs = new VolumeArgs(volAname, userArgs);
-
-    // List all buckets in volA.
-    listBucketArgs = new ListArgs(volArgs, null, 100, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(20, result.getBuckets().size());
-    List<BucketInfo> archiveBuckets = result.getBuckets().stream()
-        .filter(item -> item.getStorageType() == StorageType.ARCHIVE)
-        .collect(Collectors.toList());
-    Assert.assertEquals(4, archiveBuckets.size());
-
-    // List buckets with prefix "aBucket".
-    listBucketArgs = new ListArgs(volArgs, "aBucket", 100, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(10, result.getBuckets().size());
-    Assert.assertTrue(result.getBuckets().stream()
-        .allMatch(entry -> entry.getBucketName().startsWith("aBucket")));
-
-    // List a certain number of buckets.
-    listBucketArgs = new ListArgs(volArgs, null, 3, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(3, result.getBuckets().size());
-    Assert.assertEquals("aBucket_0",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("aBucket_1",
-        result.getBuckets().get(1).getBucketName());
-    Assert.assertEquals("aBucket_2",
-        result.getBuckets().get(2).getBucketName());
-
-    // List a certain number of buckets from the startKey.
-    listBucketArgs = new ListArgs(volArgs, null, 2, "bBucket_3");
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(2, result.getBuckets().size());
-    Assert.assertEquals("bBucket_4",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("bBucket_5",
-        result.getBuckets().get(1).getBucketName());
-
-    // Provide an invalid bucket name as start key.
-    listBucketArgs = new ListArgs(volArgs, null, 100, "unknown_bucket_name");
-    ListBuckets buckets = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(buckets.getBuckets().size(), 0);
-
-    // Use all arguments.
-    listBucketArgs = new ListArgs(volArgs, "b", 5, "bBucket_7");
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(2, result.getBuckets().size());
-    Assert.assertEquals("bBucket_8",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("bBucket_9",
-        result.getBuckets().get(1).getBucketName());
-
-    // Provide an invalid maxKeys argument.
-    try {
-      listBucketArgs = new ListArgs(volArgs, null, -1, null);
-      storageHandler.listBuckets(listBucketArgs);
-      Assert.fail("Expecting an error when the given"
-          + " maxKeys argument is invalid.");
-    } catch (Exception e) {
-      Assert.assertTrue(e.getMessage()
-          .contains(String.format("the value must be in range (0, %d]",
-              OzoneConsts.MAX_LISTBUCKETS_SIZE)));
-    }
-
-    // Provide an invalid volume name.
-    VolumeArgs invalidVolArgs = new VolumeArgs("invalid_name", userArgs);
-    try {
-      listBucketArgs = new ListArgs(invalidVolArgs, null, 100, null);
-      storageHandler.listBuckets(listBucketArgs);
-      Assert.fail("Expecting an error when the given volume name is invalid.");
-    } catch (Exception e) {
-      Assert.assertTrue(e instanceof IOException);
-      Assert.assertTrue(e.getMessage()
-          .contains(Status.VOLUME_NOT_FOUND.name()));
-    }
-  }
-
-  /**
-   * Test list keys.
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testListKeys() throws IOException, OzoneException {
-    ListKeys result = null;
-    ListArgs listKeyArgs = null;
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    // Write 20 keys in bucket.
-    int numKeys = 20;
-    String keyName = "Key";
-    KeyArgs keyArgs = null;
-    for (int i = 0; i < numKeys; i++) {
-      if (i % 2 == 0) {
-        // Create /volume/bucket/aKey[0,2,4,...,18] in bucket.
-        keyArgs = new KeyArgs("a" + keyName + i, bucketArgs);
-      } else {
-        // Create /volume/bucket/bKey[1,3,5,...,19] in bucket.
-        keyArgs = new KeyArgs("b" + keyName + i, bucketArgs);
-      }
-      keyArgs.setSize(4096);
-
-      // Just for testing list keys call, so no need to write real data.
-      OutputStream stream = storageHandler.newKeyWriter(keyArgs);
-      stream.close();
-    }
-
-    // List all keys in bucket.
-    bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    listKeyArgs = new ListArgs(bucketArgs, null, 100, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(numKeys, result.getKeyList().size());
-
-    // List keys with prefix "aKey".
-    listKeyArgs = new ListArgs(bucketArgs, "aKey", 100, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(numKeys / 2, result.getKeyList().size());
-    Assert.assertTrue(result.getKeyList().stream()
-        .allMatch(entry -> entry.getKeyName().startsWith("aKey")));
-
-    // List a certain number of keys.
-    listKeyArgs = new ListArgs(bucketArgs, null, 3, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(3, result.getKeyList().size());
-    Assert.assertEquals("aKey0",
-        result.getKeyList().get(0).getKeyName());
-    Assert.assertEquals("aKey10",
-        result.getKeyList().get(1).getKeyName());
-    Assert.assertEquals("aKey12",
-        result.getKeyList().get(2).getKeyName());
-
-    // List a certain number of keys from the startKey.
-    listKeyArgs = new ListArgs(bucketArgs, null, 2, "bKey1");
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(2, result.getKeyList().size());
-    Assert.assertEquals("bKey11",
-        result.getKeyList().get(0).getKeyName());
-    Assert.assertEquals("bKey13",
-        result.getKeyList().get(1).getKeyName());
-
-    // Provide an invalid key name as start key.
-    listKeyArgs = new ListArgs(bucketArgs, null, 100, "invalid_start_key");
-    ListKeys keys = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(keys.getKeyList().size(), 0);
-
-    // Provide an invalid maxKeys argument.
-    try {
-      listKeyArgs = new ListArgs(bucketArgs, null, -1, null);
-      storageHandler.listBuckets(listKeyArgs);
-      Assert.fail("Expecting an error when the given"
-          + " maxKeys argument is invalid.");
-    } catch (Exception e) {
-      GenericTestUtils.assertExceptionContains(
-          String.format("the value must be in range (0, %d]",
-              OzoneConsts.MAX_LISTKEYS_SIZE), e);
-    }
-
-    // Provide an invalid bucket name.
-    bucketArgs = new BucketArgs("invalid_bucket", createVolumeArgs);
-    try {
-      listKeyArgs = new ListArgs(bucketArgs, null, numKeys, null);
-      storageHandler.listKeys(listKeyArgs);
-      Assert.fail(
-          "Expecting an error when the given bucket name is invalid.");
-    } catch (IOException e) {
-      GenericTestUtils.assertExceptionContains(
-          Status.BUCKET_NOT_FOUND.name(), e);
-    }
-  }
-
-  @Test
-  public void testListVolumes() throws IOException, OzoneException {
-
-    String user0 = "testListVolumes-user-0";
-    String user1 = "testListVolumes-user-1";
-    String adminUser = "testListVolumes-admin";
-    ListArgs listVolumeArgs;
-    ListVolumes volumes;
-
-    // Create 10 volumes by user0 and user1
-    String[] user0vols = new String[10];
-    String[] user1vols = new String[10];
-    for (int i =0; i<10; i++) {
-      VolumeArgs createVolumeArgs;
-      String user0VolName = "Vol-" + user0 + "-" + i;
-      user0vols[i] = user0VolName;
-      createVolumeArgs = new VolumeArgs(user0VolName, userArgs);
-      createVolumeArgs.setUserName(user0);
-      createVolumeArgs.setAdminName(adminUser);
-      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
-      storageHandler.createVolume(createVolumeArgs);
-
-      String user1VolName = "Vol-" + user1 + "-" + i;
-      user1vols[i] = user1VolName;
-      createVolumeArgs = new VolumeArgs(user1VolName, userArgs);
-      createVolumeArgs.setUserName(user1);
-      createVolumeArgs.setAdminName(adminUser);
-      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
-      storageHandler.createVolume(createVolumeArgs);
-    }
-
-    // Test list all volumes
-    UserArgs userArgs0 = new UserArgs(user0, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    listVolumeArgs = new ListArgs(userArgs0, "Vol-testListVolumes", 100, null);
-    listVolumeArgs.setRootScan(true);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(20, volumes.getVolumes().size());
-
-    // Test list all volumes belongs to an user
-    listVolumeArgs = new ListArgs(userArgs0, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(10, volumes.getVolumes().size());
-
-    // Test prefix
-    listVolumeArgs = new ListArgs(userArgs0,
-        "Vol-" + user0 + "-3", 100, null);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(1, volumes.getVolumes().size());
-    Assert.assertEquals(user0vols[3],
-        volumes.getVolumes().get(0).getVolumeName());
-    Assert.assertEquals(user0,
-        volumes.getVolumes().get(0).getOwner().getName());
-
-    // Test list volumes by user
-    UserArgs userArgs1 = new UserArgs(user1, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    listVolumeArgs = new ListArgs(userArgs1, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(10, volumes.getVolumes().size());
-    Assert.assertEquals(user1,
-        volumes.getVolumes().get(3).getOwner().getName());
-
-    // Make sure all available fields are returned
-    final String user0vol4 = "Vol-" + user0 + "-4";
-    final String user0vol5 = "Vol-" + user0 + "-5";
-    listVolumeArgs = new ListArgs(userArgs0, null, 1, user0vol4);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(1, volumes.getVolumes().size());
-    Assert.assertEquals(user0,
-        volumes.getVolumes().get(0).getOwner().getName());
-    Assert.assertEquals(user0vol5,
-        volumes.getVolumes().get(0).getVolumeName());
-    Assert.assertEquals(5,
-        volumes.getVolumes().get(0).getQuota().getSize());
-    Assert.assertEquals(OzoneQuota.Units.GB,
-        volumes.getVolumes().get(0).getQuota().getUnit());
-
-    // User doesn't have volumes
-    UserArgs userArgsX = new UserArgs("unknwonUser", OzoneUtils.getRequestID(),
-        null, null, null, null);
-    listVolumeArgs = new ListArgs(userArgsX, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(0, volumes.getVolumes().size());
-  }
-
-  /**
-   * Test get key information.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetKeyInfo() throws IOException,
-      OzoneException, ParseException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    long currentTime = Time.now();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String keyName = "testKey";
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(4096);
-
-
-    OutputStream stream = storageHandler.newKeyWriter(keyArgs);
-    stream.close();
-
-    KeyInfo keyInfo = storageHandler.getKeyInfo(keyArgs);
-    // Compare the time in second unit since the date string reparsed to
-    // millisecond will lose precision.
-    Assert.assertTrue(
-        (HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()) / 1000) >= (
-            currentTime / 1000));
-    Assert.assertTrue(
-        (HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()) / 1000) >= (
-            currentTime / 1000));
-    Assert.assertEquals(keyName, keyInfo.getKeyName());
-    // with out data written, the size would be 0
-    Assert.assertEquals(0, keyInfo.getSize());
-  }
-
-  /**
-   * Test that the write can proceed without having to set the right size.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testWriteSize() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(100);
-    // write a key without specifying size at all
-    String keyName = "testKey";
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
-
-    // write a key with a size, but write above it.
-    String keyName1 = "testKey1";
-    KeyArgs keyArgs1 = new KeyArgs(keyName1, bucketArgs);
-    keyArgs1.setSize(30);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs1)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data1 = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs1)) {
-      in.read(data1);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data1));
-  }
-
-  /**
-   * Tests the RPC call for getting scmId and clusterId from SCM.
-   * @throws IOException
-   */
-  @Test
-  public void testGetScmInfo() throws IOException {
-    ScmInfo info = cluster.getKeySpaceManager().getScmInfo();
-    Assert.assertEquals(clusterId, info.getClusterId());
-    Assert.assertEquals(scmId, info.getScmId());
-  }
-
-
-  @Test
-  public void testExpiredOpenKey() throws Exception {
-    BackgroundService openKeyCleanUpService = ((KeyManagerImpl)cluster
-        .getKeySpaceManager().getKeyManager()).getOpenKeyCleanupService();
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setAddAcls(new LinkedList<>());
-    bucketArgs.setRemoveAcls(new LinkedList<>());
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    // open some keys.
-
-    KeyArgs keyArgs1 = new KeyArgs("testKey1", bucketArgs);
-    KeyArgs keyArgs2 = new KeyArgs("testKey2", bucketArgs);
-    KeyArgs keyArgs3 = new KeyArgs("testKey3", bucketArgs);
-    KeyArgs keyArgs4 = new KeyArgs("testKey4", bucketArgs);
-    List<BlockGroup> openKeys;
-    storageHandler.newKeyWriter(keyArgs1);
-    storageHandler.newKeyWriter(keyArgs2);
-    storageHandler.newKeyWriter(keyArgs3);
-    storageHandler.newKeyWriter(keyArgs4);
-
-    Set<String> expected = Stream.of(
-        "testKey1", "testKey2", "testKey3", "testKey4")
-        .collect(Collectors.toSet());
-
-    // Now all k1-k4 should be in open state, so ExpiredOpenKeys should not
-    // contain these values.
-    openKeys = cluster.getKeySpaceManager()
-        .getMetadataManager().getExpiredOpenKeys();
-
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      Assert.assertFalse(expected.contains(keyName));
-    }
-
-    Thread.sleep(2000);
-    // Now all k1-k4 should be in ExpiredOpenKeys
-    openKeys = cluster.getKeySpaceManager()
-        .getMetadataManager().getExpiredOpenKeys();
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      if (expected.contains(keyName)) {
-        expected.remove(keyName);
-      }
-    }
-    Assert.assertEquals(0, expected.size());
-
-    KeyArgs keyArgs5 = new KeyArgs("testKey5", bucketArgs);
-    storageHandler.newKeyWriter(keyArgs5);
-
-    openKeyCleanUpService.triggerBackgroundTaskForTesting();
-    Thread.sleep(2000);
-    // now all k1-k4 should have been removed by the clean-up task, only k5
-    // should be present in ExpiredOpenKeys.
-    openKeys =
-        cluster.getKeySpaceManager().getMetadataManager().getExpiredOpenKeys();
-    System.out.println(openKeys);
-    boolean key5found = false;
-    Set<String> removed = Stream.of(
-        "testKey1", "testKey2", "testKey3", "testKey4")
-        .collect(Collectors.toSet());
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      Assert.assertFalse(removed.contains(keyName));
-      if (keyName.equals("testKey5")) {
-        key5found = true;
-      }
-    }
-    Assert.assertTrue(key5found);
-  }
-
-  /**
-   * Tests the KSM Initialization.
-   * @throws IOException
-   */
-  @Test
-  public void testKSMInitialization() throws IOException {
-    // Read the version file info from KSM version file
-    KSMStorage ksmStorage = cluster.getKeySpaceManager().getKsmStorage();
-    SCMStorage scmStorage = new SCMStorage(conf);
-    // asserts whether cluster Id and SCM ID are properly set in SCM Version
-    // file.
-    Assert.assertEquals(clusterId, scmStorage.getClusterID());
-    Assert.assertEquals(scmId, scmStorage.getScmId());
-    // asserts whether KSM Id is properly set in KSM Version file.
-    Assert.assertEquals(ksmId, ksmStorage.getKsmId());
-    // asserts whether the SCM info is correct in KSM Version file.
-    Assert.assertEquals(clusterId, ksmStorage.getClusterID());
-    Assert.assertEquals(scmId, ksmStorage.getScmId());
-  }
-
-  /**
-   * Tests the KSM Initialization Failure.
-   * @throws IOException
-   */
-  @Test
-  public void testKSMInitializationFailure() throws Exception {
-    OzoneConfiguration config = new OzoneConfiguration();
-    final String path =
-        GenericTestUtils.getTempPath(UUID.randomUUID().toString());
-    Path metaDirPath = Paths.get(path, "ksm-meta");
-    config.set(OzoneConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
-    config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
-        conf.get(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY));
-    exception.expect(KSMException.class);
-    exception.expectMessage("KSM not initialized.");
-    KeySpaceManager.createKSM(null, config);
-    KSMStorage ksmStore = new KSMStorage(config);
-    ksmStore.setClusterId("testClusterId");
-    ksmStore.setScmId("testScmId");
-    // writes the version file properties
-    ksmStore.initialize();
-    exception.expect(KSMException.class);
-    exception.expectMessage("SCM version info mismatch.");
-    KeySpaceManager.createKSM(null, conf);
-  }
-
-  @Test
-  public void testGetServiceList() throws IOException {
-    long numGetServiceListCalls = ksmMetrics.getNumGetServiceLists();
-    List<ServiceInfo> services = cluster.getKeySpaceManager().getServiceList();
-
-    Assert.assertEquals(numGetServiceListCalls + 1,
-        ksmMetrics.getNumGetServiceLists());
-
-    ServiceInfo ksmInfo = services.stream().filter(
-        a -> a.getNodeType().equals(HddsProtos.NodeType.KSM))
-        .collect(Collectors.toList()).get(0);
-    InetSocketAddress ksmAddress = new InetSocketAddress(ksmInfo.getHostname(),
-        ksmInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(NetUtils.createSocketAddr(
-        conf.get(OZONE_KSM_ADDRESS_KEY)), ksmAddress);
-
-    ServiceInfo scmInfo = services.stream().filter(
-        a -> a.getNodeType().equals(HddsProtos.NodeType.SCM))
-        .collect(Collectors.toList()).get(0);
-    InetSocketAddress scmAddress = new InetSocketAddress(scmInfo.getHostname(),
-        scmInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(NetUtils.createSocketAddr(
-        conf.get(OZONE_SCM_CLIENT_ADDRESS_KEY)), scmAddress);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
deleted file mode 100644
index feb83d3..0000000
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.ksm;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.core.type.TypeReference;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.http.util.EntityUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.KsmUtils.getKsmAddressForClients;
-
-/**
- * This class is to test the REST interface exposed by KeySpaceManager.
- */
-public class TestKeySpaceManagerRestInterface {
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testGetServiceList() throws Exception {
-    KeySpaceManagerHttpServer server =
-        cluster.getKeySpaceManager().getHttpServer();
-    HttpClient client = HttpClients.createDefault();
-    String connectionUri = "http://" +
-        NetUtils.getHostPortString(server.getHttpAddress());
-    HttpGet httpGet = new HttpGet(connectionUri + "/serviceList");
-    HttpResponse response = client.execute(httpGet);
-    String serviceListJson = EntityUtils.toString(response.getEntity());
-
-    ObjectMapper objectMapper = new ObjectMapper();
-    TypeReference<List<ServiceInfo>> serviceInfoReference =
-        new TypeReference<List<ServiceInfo>>() {};
-    List<ServiceInfo> serviceInfos = objectMapper.readValue(
-        serviceListJson, serviceInfoReference);
-    Map<HddsProtos.NodeType, ServiceInfo> serviceMap = new HashMap<>();
-    for (ServiceInfo serviceInfo : serviceInfos) {
-      serviceMap.put(serviceInfo.getNodeType(), serviceInfo);
-    }
-
-    InetSocketAddress ksmAddress =
-        getKsmAddressForClients(conf);
-    ServiceInfo ksmInfo = serviceMap.get(HddsProtos.NodeType.KSM);
-
-    Assert.assertEquals(ksmAddress.getHostName(), ksmInfo.getHostname());
-    Assert.assertEquals(ksmAddress.getPort(),
-        ksmInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(server.getHttpAddress().getPort(),
-        ksmInfo.getPort(ServicePort.Type.HTTP));
-
-    InetSocketAddress scmAddress =
-        getScmAddressForClients(conf);
-    ServiceInfo scmInfo = serviceMap.get(HddsProtos.NodeType.SCM);
-
-    Assert.assertEquals(scmAddress.getHostName(), scmInfo.getHostname());
-    Assert.assertEquals(scmAddress.getPort(),
-        scmInfo.getPort(ServicePort.Type.RPC));
-
-    ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE);
-    DatanodeDetails datanodeDetails = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails();
-    Assert.assertEquals(datanodeDetails.getHostName(),
-        datanodeInfo.getHostname());
-
-    Map<ServicePort.Type, Integer> ports = datanodeInfo.getPorts();
-    for(ServicePort.Type type : ports.keySet()) {
-      switch (type) {
-      case HTTP:
-      case HTTPS:
-        Assert.assertEquals(
-            datanodeDetails.getPort(DatanodeDetails.Port.Name.REST).getValue(),
-            ports.get(type));
-        break;
-      default:
-        // KSM only sends Datanode's info port details
-        // i.e. HTTP or HTTPS
-        // Other ports are not expected as of now.
-        Assert.fail();
-        break;
-      }
-    }
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: YARN-7451. Add missing tests to verify the presence of custom resources of RM apps and scheduler webservice endpoints (snemeth via rkanter)

Posted by vi...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
new file mode 100644
index 0000000..7c5b6db
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/AppInfoXmlVerifications.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.w3c.dom.Element;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.checkStringMatch;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlFloat;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Contains all value verifications that are needed to verify {@link AppInfo}
+ * XML documents.
+ */
+public final class AppInfoXmlVerifications {
+
+  private AppInfoXmlVerifications() {
+    //utility class
+  }
+
+  /**
+   * Tests whether {@link AppInfo} representation object contains the required
+   * values as per defined in the specified app parameter.
+   * @param info
+   * @param  app  an RMApp instance that contains the required values
+   */
+  public static void verify(Element info, RMApp app) {
+    checkStringMatch("id", app.getApplicationId()
+            .toString(), getXmlString(info, "id"));
+    checkStringMatch("user", app.getUser(),
+            getXmlString(info, "user"));
+    checkStringMatch("name", app.getName(),
+            getXmlString(info, "name"));
+    checkStringMatch("applicationType",
+            app.getApplicationType(), getXmlString(info, "applicationType"));
+    checkStringMatch("queue", app.getQueue(),
+            getXmlString(info, "queue"));
+    assertEquals("priority doesn't match", 0, getXmlInt(info, "priority"));
+    checkStringMatch("state", app.getState().toString(),
+            getXmlString(info, "state"));
+    checkStringMatch("finalStatus", app
+            .getFinalApplicationStatus().toString(),
+            getXmlString(info, "finalStatus"));
+    assertEquals("progress doesn't match", 0, getXmlFloat(info, "progress"),
+        0.0);
+    if ("UNASSIGNED".equals(getXmlString(info, "trackingUI"))) {
+      checkStringMatch("trackingUI", "UNASSIGNED",
+              getXmlString(info, "trackingUI"));
+    }
+    WebServicesTestUtils.checkStringEqual("diagnostics",
+            app.getDiagnostics().toString(), getXmlString(info, "diagnostics"));
+    assertEquals("clusterId doesn't match",
+            ResourceManager.getClusterTimeStamp(),
+            getXmlLong(info, "clusterId"));
+    assertEquals("startedTime doesn't match", app.getStartTime(),
+            getXmlLong(info, "startedTime"));
+    assertEquals("finishedTime doesn't match", app.getFinishTime(),
+            getXmlLong(info, "finishedTime"));
+    assertTrue("elapsed time not greater than 0",
+            getXmlLong(info, "elapsedTime") > 0);
+    checkStringMatch("amHostHttpAddress", app
+                    .getCurrentAppAttempt().getMasterContainer()
+                    .getNodeHttpAddress(),
+            getXmlString(info, "amHostHttpAddress"));
+    assertTrue("amContainerLogs doesn't match",
+        getXmlString(info, "amContainerLogs").startsWith("http://"));
+    assertTrue("amContainerLogs doesn't contain user info",
+        getXmlString(info, "amContainerLogs").endsWith("/" + app.getUser()));
+    assertEquals("allocatedMB doesn't match", 1024,
+            getXmlInt(info, "allocatedMB"));
+    assertEquals("allocatedVCores doesn't match", 1,
+            getXmlInt(info, "allocatedVCores"));
+    assertEquals("queueUsagePerc doesn't match", 50.0f,
+            getXmlFloat(info, "queueUsagePercentage"), 0.01f);
+    assertEquals("clusterUsagePerc doesn't match", 50.0f,
+            getXmlFloat(info, "clusterUsagePercentage"), 0.01f);
+    assertEquals("numContainers doesn't match", 1,
+        getXmlInt(info, "runningContainers"));
+    assertNotNull("preemptedResourceSecondsMap should not be null",
+            info.getElementsByTagName("preemptedResourceSecondsMap"));
+    assertEquals("preemptedResourceMB doesn't match", app
+                    .getRMAppMetrics().getResourcePreempted().getMemorySize(),
+            getXmlInt(info, "preemptedResourceMB"));
+    assertEquals("preemptedResourceVCores doesn't match", app
+                    .getRMAppMetrics().getResourcePreempted().getVirtualCores(),
+            getXmlInt(info, "preemptedResourceVCores"));
+    assertEquals("numNonAMContainerPreempted doesn't match", app
+                    .getRMAppMetrics().getNumNonAMContainersPreempted(),
+            getXmlInt(info, "numNonAMContainerPreempted"));
+    assertEquals("numAMContainerPreempted doesn't match", app
+                    .getRMAppMetrics().getNumAMContainersPreempted(),
+            getXmlInt(info, "numAMContainerPreempted"));
+    assertEquals("Log aggregation Status doesn't match", app
+                    .getLogAggregationStatusForAppReport().toString(),
+            getXmlString(info, "logAggregationStatus"));
+    assertEquals("unmanagedApplication doesn't match", app
+                    .getApplicationSubmissionContext().getUnmanagedAM(),
+            getXmlBoolean(info, "unmanagedApplication"));
+    assertEquals("unmanagedApplication doesn't match",
+            app.getApplicationSubmissionContext().getNodeLabelExpression(),
+            getXmlString(info, "appNodeLabelExpression"));
+    assertEquals("unmanagedApplication doesn't match",
+            app.getAMResourceRequests().get(0).getNodeLabelExpression(),
+            getXmlString(info, "amNodeLabelExpression"));
+    assertEquals("amRPCAddress",
+            AppInfo.getAmRPCAddressFromRMAppAttempt(app.getCurrentAppAttempt()),
+            getXmlString(info, "amRPCAddress"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
new file mode 100644
index 0000000..a8990ca
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/BufferedClientResponse.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+
+import com.sun.jersey.api.client.ClientHandlerException;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.UniformInterfaceException;
+
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+
+/**
+ * This class is merely a wrapper for {@link ClientResponse}. Given that the
+ * entity input stream of {@link ClientResponse} can be read only once by
+ * default and for some tests it is convenient to read the input stream many
+ * times, this class hides the details of how to do that and prevents
+ * unnecessary code duplication in tests.
+ */
+public class BufferedClientResponse {
+  private ClientResponse response;
+
+  public BufferedClientResponse(ClientResponse response) {
+    response.bufferEntity();
+    this.response = response;
+  }
+
+  public <T> T getEntity(Class<T> clazz)
+          throws ClientHandlerException, UniformInterfaceException {
+    try {
+      response.getEntityInputStream().reset();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+    return response.getEntity(clazz);
+  }
+
+  public MediaType getType() {
+    return response.getType();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
new file mode 100644
index 0000000..9d6a111
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/JsonCustomResourceTypeTestcase.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.http.JettyUtils;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.core.MediaType;
+
+import java.util.function.Consumer;
+
+import static org.junit.Assert.*;
+
+/**
+ * This class hides the implementation details of how to verify the structure of
+ * JSON responses. Tests should only provide the path of the
+ * {@link WebResource}, the response from the resource and
+ * the verifier Consumer to
+ * {@link JsonCustomResourceTypeTestcase#verify(Consumer)}. An instance of
+ * {@link JSONObject} will be passed to that consumer to be able to
+ * verify the response.
+ */
+public class JsonCustomResourceTypeTestcase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JsonCustomResourceTypeTestcase.class);
+
+  private final WebResource path;
+  private final BufferedClientResponse response;
+  private final JSONObject parsedResponse;
+
+  public JsonCustomResourceTypeTestcase(WebResource path,
+                                        BufferedClientResponse response) {
+    this.path = path;
+    this.response = response;
+    this.parsedResponse = response.getEntity(JSONObject.class);
+  }
+
+  public void verify(Consumer<JSONObject> verifier) {
+    assertEquals(MediaType.APPLICATION_JSON_TYPE + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+
+    logResponse();
+
+    String responseStr = response.getEntity(String.class);
+    if (responseStr == null || responseStr.isEmpty()) {
+      throw new IllegalStateException("Response is null or empty!");
+    }
+    verifier.accept(parsedResponse);
+  }
+
+  private void logResponse() {
+    String responseStr = response.getEntity(String.class);
+    LOG.info("Raw response from service URL {}: {}", path.toString(),
+        responseStr);
+    LOG.info("Parsed response from service URL {}: {}", path.toString(),
+        parsedResponse);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
new file mode 100644
index 0000000..6e58a89
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsJsonVerifications.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+
+import java.util.List;
+import java.util.Map;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performs value verifications on
+ * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
+ * objects against the values of {@link ResourceRequest}. With the help of the
+ * {@link Builder}, users can also make verifications of the custom resource
+ * types and its values.
+ */
+public class ResourceRequestsJsonVerifications {
+  private final ResourceRequest resourceRequest;
+  private final JSONObject requestInfo;
+  private final Map<String, Long> customResourceTypes;
+  private final List<String> expectedCustomResourceTypes;
+
+  ResourceRequestsJsonVerifications(Builder builder) {
+    this.resourceRequest = builder.resourceRequest;
+    this.requestInfo = builder.requestInfo;
+    this.customResourceTypes = builder.customResourceTypes;
+    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
+  }
+
+  public static void verify(JSONObject requestInfo, ResourceRequest rr)
+      throws JSONException {
+    createDefaultBuilder(requestInfo, rr).build().verify();
+  }
+
+  public static void verifyWithCustomResourceTypes(JSONObject requestInfo,
+      ResourceRequest resourceRequest, List<String> expectedResourceTypes)
+      throws JSONException {
+
+    createDefaultBuilder(requestInfo, resourceRequest)
+        .withExpectedCustomResourceTypes(expectedResourceTypes)
+        .withCustomResourceTypes(
+            extractActualCustomResourceTypes(requestInfo, expectedResourceTypes))
+        .build().verify();
+  }
+
+  private static Builder createDefaultBuilder(JSONObject requestInfo,
+      ResourceRequest resourceRequest) {
+    return new ResourceRequestsJsonVerifications.Builder()
+            .withRequest(resourceRequest)
+            .withRequestInfoJson(requestInfo);
+  }
+
+  private static Map<String, Long> extractActualCustomResourceTypes(
+      JSONObject requestInfo, List<String> expectedResourceTypes)
+      throws JSONException {
+    JSONObject capability = requestInfo.getJSONObject("capability");
+    Map<String, Long> resourceAndValue =
+        extractCustomResorceTypeValues(capability, expectedResourceTypes);
+    Map.Entry<String, Long> resourceEntry =
+        resourceAndValue.entrySet().iterator().next();
+
+    assertTrue(
+        "Found resource type: " + resourceEntry.getKey()
+            + " is not in expected resource types: " + expectedResourceTypes,
+        expectedResourceTypes.contains(resourceEntry.getKey()));
+
+    return resourceAndValue;
+  }
+
+  private static Map<String, Long> extractCustomResorceTypeValues(
+      JSONObject capability, List<String> expectedResourceTypes)
+      throws JSONException {
+    assertTrue(
+        "resourceCategory does not have resourceInformations: " + capability,
+        capability.has("resourceInformations"));
+
+    JSONObject resourceInformations =
+        capability.getJSONObject("resourceInformations");
+    assertTrue(
+        "resourceInformations does not have resourceInformation object: "
+            + resourceInformations,
+        resourceInformations.has("resourceInformation"));
+    JSONArray customResources =
+        resourceInformations.getJSONArray("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        expectedResourceTypes.size(), customResources.length() - 2);
+
+    Map<String, Long> resourceValues = Maps.newHashMap();
+    for (int i = 0; i < customResources.length(); i++) {
+      JSONObject customResource = customResources.getJSONObject(i);
+      assertTrue("Resource type does not have name field: " + customResource,
+          customResource.has("name"));
+      assertTrue("Resource type does not have name resourceType field: "
+          + customResource, customResource.has("resourceType"));
+      assertTrue(
+          "Resource type does not have name units field: " + customResource,
+          customResource.has("units"));
+      assertTrue(
+          "Resource type does not have name value field: " + customResource,
+          customResource.has("value"));
+
+      String name = customResource.getString("name");
+      String unit = customResource.getString("units");
+      String resourceType = customResource.getString("resourceType");
+      Long value = customResource.getLong("value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          expectedResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Custom resource value " + value + " is null!", value);
+      resourceValues.put(name, value);
+    }
+
+    return resourceValues;
+  }
+
+  private void verify() throws JSONException {
+    assertEquals("nodeLabelExpression doesn't match",
+        resourceRequest.getNodeLabelExpression(),
+            requestInfo.getString("nodeLabelExpression"));
+    assertEquals("numContainers doesn't match",
+            resourceRequest.getNumContainers(),
+            requestInfo.getInt("numContainers"));
+    assertEquals("relaxLocality doesn't match",
+            resourceRequest.getRelaxLocality(),
+            requestInfo.getBoolean("relaxLocality"));
+    assertEquals("priority does not match",
+            resourceRequest.getPriority().getPriority(),
+            requestInfo.getInt("priority"));
+    assertEquals("resourceName does not match",
+            resourceRequest.getResourceName(),
+            requestInfo.getString("resourceName"));
+    assertEquals("memory does not match",
+        resourceRequest.getCapability().getMemorySize(),
+            requestInfo.getJSONObject("capability").getLong("memory"));
+    assertEquals("vCores does not match",
+        resourceRequest.getCapability().getVirtualCores(),
+            requestInfo.getJSONObject("capability").getLong("vCores"));
+
+    verifyAtLeastOneCustomResourceIsSerialized();
+
+    JSONObject executionTypeRequest =
+            requestInfo.getJSONObject("executionTypeRequest");
+    assertEquals("executionType does not match",
+        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
+            executionTypeRequest.getString("executionType"));
+    assertEquals("enforceExecutionType does not match",
+            resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
+            executionTypeRequest.getBoolean("enforceExecutionType"));
+  }
+
+  /**
+   * JSON serialization produces "invalid JSON" by default as maps are
+   * serialized like this:
+   * "customResources":{"entry":{"key":"customResource-1","value":"0"}}
+   * If the map has multiple keys then multiple entries will be serialized.
+   * Our json parser in tests cannot handle duplicates therefore only one
+   * custom resource will be in the parsed json. See:
+   * https://issues.apache.org/jira/browse/YARN-7505
+   */
+  private void verifyAtLeastOneCustomResourceIsSerialized() {
+    boolean resourceFound = false;
+    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
+      if (customResourceTypes.containsKey(expectedCustomResourceType)) {
+        resourceFound = true;
+        Long resourceValue =
+            customResourceTypes.get(expectedCustomResourceType);
+        assertNotNull("Resource value should not be null!", resourceValue);
+      }
+    }
+    assertTrue("No custom resource type can be found in the response!",
+        resourceFound);
+  }
+
+  /**
+   * Builder class for {@link ResourceRequestsJsonVerifications}.
+   */
+  public static final class Builder {
+    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
+    private Map<String, Long> customResourceTypes;
+    private ResourceRequest resourceRequest;
+    private JSONObject requestInfo;
+
+    Builder() {
+    }
+
+    public static Builder create() {
+      return new Builder();
+    }
+
+    Builder withExpectedCustomResourceTypes(
+            List<String> expectedCustomResourceTypes) {
+      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
+      return this;
+    }
+
+    Builder withCustomResourceTypes(
+            Map<String, Long> customResourceTypes) {
+      this.customResourceTypes = customResourceTypes;
+      return this;
+    }
+
+    Builder withRequest(ResourceRequest resourceRequest) {
+      this.resourceRequest = resourceRequest;
+      return this;
+    }
+
+    Builder withRequestInfoJson(JSONObject requestInfo) {
+      this.requestInfo = requestInfo;
+      return this;
+    }
+
+    public ResourceRequestsJsonVerifications build() {
+      return new ResourceRequestsJsonVerifications(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
new file mode 100644
index 0000000..af9b0f3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/ResourceRequestsXmlVerifications.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static junit.framework.TestCase.assertTrue;
+import static org.apache.hadoop.yarn.server.resourcemanager.webapp.helper.XmlCustomResourceTypeTestCase.toXml;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlBoolean;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlInt;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlLong;
+import static org.apache.hadoop.yarn.webapp.WebServicesTestUtils.getXmlString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performs value verifications on
+ * {@link org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceRequestInfo}
+ * objects against the values of {@link ResourceRequest}. With the help of the
+ * {@link Builder}, users can also make verifications of the custom resource
+ * types and its values.
+ */
+public class ResourceRequestsXmlVerifications {
+  private final ResourceRequest resourceRequest;
+  private final Element requestInfo;
+  private final Map<String, Long> customResourceTypes;
+  private final List<String> expectedCustomResourceTypes;
+
+  ResourceRequestsXmlVerifications(Builder builder) {
+    this.resourceRequest = builder.resourceRequest;
+    this.requestInfo = builder.requestInfo;
+    this.customResourceTypes = builder.customResourceTypes;
+    this.expectedCustomResourceTypes = builder.expectedCustomResourceTypes;
+  }
+
+  public static void verifyWithCustomResourceTypes(Element requestInfo,
+      ResourceRequest resourceRequest, List<String> expectedResourceTypes) {
+
+    createDefaultBuilder(requestInfo, resourceRequest)
+        .withExpectedCustomResourceTypes(expectedResourceTypes)
+        .withCustomResourceTypes(extractActualCustomResourceType(requestInfo,
+            expectedResourceTypes))
+        .build().verify();
+  }
+
+  private static Builder createDefaultBuilder(Element requestInfo,
+      ResourceRequest resourceRequest) {
+    return new ResourceRequestsXmlVerifications.Builder()
+        .withRequest(resourceRequest).withRequestInfo(requestInfo);
+  }
+
+  private static Map<String, Long> extractActualCustomResourceType(
+      Element requestInfo, List<String> expectedResourceTypes) {
+    Element capability =
+        (Element) requestInfo.getElementsByTagName("capability").item(0);
+
+    return extractCustomResorceTypes(capability,
+        Sets.newHashSet(expectedResourceTypes));
+  }
+
+  private static Map<String, Long> extractCustomResorceTypes(Element capability,
+      Set<String> expectedResourceTypes) {
+    assertEquals(
+        toXml(capability) + " should have only one resourceInformations child!",
+        1, capability.getElementsByTagName("resourceInformations").getLength());
+    Element resourceInformations = (Element) capability
+        .getElementsByTagName("resourceInformations").item(0);
+
+    NodeList customResources =
+        resourceInformations.getElementsByTagName("resourceInformation");
+
+    // customResources will include vcores / memory as well
+    assertEquals(
+        "Different number of custom resource types found than expected",
+        expectedResourceTypes.size(), customResources.getLength() - 2);
+
+    Map<String, Long> resourceTypesAndValues = Maps.newHashMap();
+    for (int i = 0; i < customResources.getLength(); i++) {
+      Element customResource = (Element) customResources.item(i);
+      String name = getXmlString(customResource, "name");
+      String unit = getXmlString(customResource, "units");
+      String resourceType = getXmlString(customResource, "resourceType");
+      Long value = getXmlLong(customResource, "value");
+
+      if (ResourceInformation.MEMORY_URI.equals(name)
+          || ResourceInformation.VCORES_URI.equals(name)) {
+        continue;
+      }
+
+      assertTrue("Custom resource type " + name + " not found",
+          expectedResourceTypes.contains(name));
+      assertEquals("k", unit);
+      assertEquals(ResourceTypes.COUNTABLE,
+          ResourceTypes.valueOf(resourceType));
+      assertNotNull("Resource value should not be null for resource type "
+          + resourceType + ", listing xml contents: " + toXml(customResource),
+          value);
+      resourceTypesAndValues.put(name, value);
+    }
+
+    return resourceTypesAndValues;
+  }
+
+  private void verify() {
+    assertEquals("nodeLabelExpression doesn't match",
+        resourceRequest.getNodeLabelExpression(),
+        getXmlString(requestInfo, "nodeLabelExpression"));
+    assertEquals("numContainers doesn't match",
+        resourceRequest.getNumContainers(),
+        getXmlInt(requestInfo, "numContainers"));
+    assertEquals("relaxLocality doesn't match",
+        resourceRequest.getRelaxLocality(),
+        getXmlBoolean(requestInfo, "relaxLocality"));
+    assertEquals("priority does not match",
+        resourceRequest.getPriority().getPriority(),
+        getXmlInt(requestInfo, "priority"));
+    assertEquals("resourceName does not match",
+        resourceRequest.getResourceName(),
+        getXmlString(requestInfo, "resourceName"));
+    Element capability = (Element) requestInfo
+            .getElementsByTagName("capability").item(0);
+    assertEquals("memory does not match",
+        resourceRequest.getCapability().getMemorySize(),
+        getXmlLong(capability, "memory"));
+    assertEquals("vCores does not match",
+        resourceRequest.getCapability().getVirtualCores(),
+        getXmlLong(capability, "vCores"));
+
+    for (String expectedCustomResourceType : expectedCustomResourceTypes) {
+      assertTrue(
+          "Custom resource type " + expectedCustomResourceType
+              + " cannot be found!",
+          customResourceTypes.containsKey(expectedCustomResourceType));
+
+      Long resourceValue = customResourceTypes.get(expectedCustomResourceType);
+      assertNotNull("Resource value should not be null!", resourceValue);
+    }
+
+    Element executionTypeRequest = (Element) requestInfo
+        .getElementsByTagName("executionTypeRequest").item(0);
+    assertEquals("executionType does not match",
+        resourceRequest.getExecutionTypeRequest().getExecutionType().name(),
+        getXmlString(executionTypeRequest, "executionType"));
+    assertEquals("enforceExecutionType does not match",
+        resourceRequest.getExecutionTypeRequest().getEnforceExecutionType(),
+        getXmlBoolean(executionTypeRequest, "enforceExecutionType"));
+  }
+
+  /**
+   * Builder class for {@link ResourceRequestsXmlVerifications}.
+   */
+  public static final class Builder {
+    private List<String> expectedCustomResourceTypes = Lists.newArrayList();
+    private Map<String, Long> customResourceTypes;
+    private ResourceRequest resourceRequest;
+    private Element requestInfo;
+
+    Builder() {
+    }
+
+    public static Builder create() {
+      return new Builder();
+    }
+
+    Builder withExpectedCustomResourceTypes(
+        List<String> expectedCustomResourceTypes) {
+      this.expectedCustomResourceTypes = expectedCustomResourceTypes;
+      return this;
+    }
+
+    Builder withCustomResourceTypes(Map<String, Long> customResourceTypes) {
+      this.customResourceTypes = customResourceTypes;
+      return this;
+    }
+
+    Builder withRequest(ResourceRequest resourceRequest) {
+      this.resourceRequest = resourceRequest;
+      return this;
+    }
+
+    Builder withRequestInfo(Element requestInfo) {
+      this.requestInfo = requestInfo;
+      return this;
+    }
+
+    public ResourceRequestsXmlVerifications build() {
+      return new ResourceRequestsXmlVerifications(this);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99febe7f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
new file mode 100644
index 0000000..29260aa
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/helper/XmlCustomResourceTypeTestCase.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.helper;
+
+import com.sun.jersey.api.client.WebResource;
+import org.apache.hadoop.http.JettyUtils;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Node;
+import org.xml.sax.InputSource;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.transform.*;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.function.Consumer;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class hides the implementation details of how to verify the structure of
+ * XML responses. Tests should only provide the path of the
+ * {@link WebResource}, the response from the resource and
+ * the verifier Consumer to
+ * {@link XmlCustomResourceTypeTestCase#verify(Consumer)}. An instance of
+ * {@link JSONObject} will be passed to that consumer to be able to
+ * verify the response.
+ */
+public class XmlCustomResourceTypeTestCase {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(XmlCustomResourceTypeTestCase.class);
+
+  private WebResource path;
+  private BufferedClientResponse response;
+  private Document parsedResponse;
+
+  public XmlCustomResourceTypeTestCase(WebResource path,
+                                       BufferedClientResponse response) {
+    this.path = path;
+    this.response = response;
+  }
+
+  public void verify(Consumer<Document> verifier) {
+    assertEquals(MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8,
+        response.getType().toString());
+
+    parsedResponse = parseXml(response);
+    logResponse(parsedResponse);
+    verifier.accept(parsedResponse);
+  }
+
+  private Document parseXml(BufferedClientResponse response) {
+    try {
+      String xml = response.getEntity(String.class);
+      DocumentBuilder db =
+          DocumentBuilderFactory.newInstance().newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+
+      return db.parse(is);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private void logResponse(Document doc) {
+    String responseStr = response.getEntity(String.class);
+    LOG.info("Raw response from service URL {}: {}", path.toString(),
+        responseStr);
+    LOG.info("Parsed response from service URL {}: {}", path.toString(),
+        toXml(doc));
+  }
+
+  public static String toXml(Node node) {
+    StringWriter writer;
+    try {
+      TransformerFactory tf = TransformerFactory.newInstance();
+      Transformer transformer = tf.newTransformer();
+      transformer.setOutputProperty(OutputKeys.INDENT, "yes");
+      transformer.setOutputProperty(
+          "{http://xml.apache.org/xslt}indent" + "-amount", "2");
+      writer = new StringWriter();
+      transformer.transform(new DOMSource(node), new StreamResult(writer));
+    } catch (TransformerException e) {
+      throw new RuntimeException(e);
+    }
+
+    return writer.getBuffer().toString();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HDDS-198. Create AuditLogger mechanism to be used by OM, SCM and Datanode. Contributed by Dinesh Chitlangia.

Posted by vi...@apache.org.
HDDS-198. Create AuditLogger mechanism to be used by OM, SCM and Datanode.
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0ef7e76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0ef7e76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0ef7e76

Branch: refs/heads/HDFS-12090
Commit: c0ef7e7680d882e2182f48f033109678a48742ab
Parents: 51654a3
Author: Anu Engineer <ae...@apache.org>
Authored: Tue Jul 3 11:38:14 2018 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Tue Jul 3 11:39:50 2018 -0700

----------------------------------------------------------------------
 hadoop-hdds/common/pom.xml                      |  18 +++
 .../apache/hadoop/ozone/audit/AuditAction.java  |  30 +++++
 .../hadoop/ozone/audit/AuditEventStatus.java    |  36 +++++
 .../apache/hadoop/ozone/audit/AuditLogger.java  | 128 ++++++++++++++++++
 .../hadoop/ozone/audit/AuditLoggerType.java     |  37 ++++++
 .../apache/hadoop/ozone/audit/AuditMarker.java  |  38 ++++++
 .../apache/hadoop/ozone/audit/Auditable.java    |  32 +++++
 .../apache/hadoop/ozone/audit/package-info.java | 123 +++++++++++++++++
 .../apache/hadoop/ozone/audit/DummyAction.java  |  51 ++++++++
 .../apache/hadoop/ozone/audit/DummyEntity.java  |  57 ++++++++
 .../ozone/audit/TestOzoneAuditLogger.java       | 131 +++++++++++++++++++
 .../apache/hadoop/ozone/audit/package-info.java |  23 ++++
 .../common/src/test/resources/log4j2.properties |  76 +++++++++++
 .../org/apache/hadoop/ozone/audit/OMAction.java |  51 ++++++++
 .../apache/hadoop/ozone/audit/package-info.java |  22 ++++
 15 files changed, 853 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index a8a634c..4068522 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -31,6 +31,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <properties>
     <hadoop.component>hdds</hadoop.component>
     <is.hadoop.component>true</is.hadoop.component>
+    <log4j2.version>2.11.0</log4j2.version>
+    <disruptor.version>3.4.2</disruptor.version>
   </properties>
 
   <dependencies>
@@ -81,6 +83,22 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <type>test-jar</type>
     </dependency>
 
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-api</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-core</artifactId>
+      <version>${log4j2.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.lmax</groupId>
+      <artifactId>disruptor</artifactId>
+      <version>${disruptor.version}</version>
+    </dependency>
+
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
new file mode 100644
index 0000000..8c1d6f0
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditAction.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.audit;
+
+/**
+ * Interface to define AuditAction.
+ */
+public interface AuditAction {
+  /**
+   * Implementation must override.
+   * @return String
+   */
+  String getAction();
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
new file mode 100644
index 0000000..098ab6b
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.audit;
+
+/**
+ * Enum to define AuditEventStatus values.
+ */
+public enum AuditEventStatus {
+  SUCCESS("SUCCESS"),
+  FAILURE("FAILURE");
+
+  private String status;
+
+  AuditEventStatus(String status){
+    this.status = status;
+  }
+
+  public String getStatus() {
+    return status;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
new file mode 100644
index 0000000..46ffaab
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.audit;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.logging.log4j.Level;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Marker;
+import org.apache.logging.log4j.message.StructuredDataMessage;
+import org.apache.logging.log4j.spi.ExtendedLogger;
+
+import java.util.Map;
+
+/**
+ * Class to define Audit Logger for Ozone.
+ */
+public class AuditLogger {
+
+  private ExtendedLogger logger;
+
+  private static final String SUCCESS = AuditEventStatus.SUCCESS.getStatus();
+  private static final String FAILURE = AuditEventStatus.FAILURE.getStatus();
+  private static final String FQCN = AuditLogger.class.getName();
+  private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
+  private static final Marker READ_MARKER = AuditMarker.READ.getMarker();
+
+  /**
+   * Parametrized Constructor to initialize logger.
+   * @param type
+   */
+  public AuditLogger(AuditLoggerType type){
+    initializeLogger(type);
+  }
+
+  /**
+   * Initializes the logger with specific type.
+   * @param loggerType specified one of the values from enum AuditLoggerType.
+   */
+  private void initializeLogger(AuditLoggerType loggerType){
+    this.logger = LogManager.getContext(false).getLogger(loggerType.getType());
+  }
+
+  @VisibleForTesting
+  public ExtendedLogger getLogger() {
+    return logger;
+  }
+
+  public void logWriteSuccess(AuditAction type, Map<String, String> data) {
+    logWriteSuccess(type, data, Level.INFO);
+  }
+
+  public void logWriteSuccess(AuditAction type, Map<String, String> data, Level
+      level) {
+    StructuredDataMessage msg = new StructuredDataMessage("", SUCCESS,
+        type.getAction(), data);
+    this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, null);
+  }
+
+
+  public void logWriteFailure(AuditAction type, Map<String, String> data) {
+    logWriteFailure(type, data, Level.INFO, null);
+  }
+
+  public void logWriteFailure(AuditAction type, Map<String, String> data, Level
+      level) {
+    logWriteFailure(type, data, level, null);
+  }
+
+  public void logWriteFailure(AuditAction type, Map<String, String> data,
+      Throwable exception) {
+    logWriteFailure(type, data, Level.INFO, exception);
+  }
+
+  public void logWriteFailure(AuditAction type, Map<String, String> data, Level
+      level, Throwable exception) {
+    StructuredDataMessage msg = new StructuredDataMessage("", FAILURE,
+        type.getAction(), data);
+    this.logger.logIfEnabled(FQCN, level, WRITE_MARKER, msg, exception);
+  }
+
+  public void logReadSuccess(AuditAction type, Map<String, String> data) {
+    logReadSuccess(type, data, Level.INFO);
+  }
+
+  public void logReadSuccess(AuditAction type, Map<String, String> data, Level
+      level) {
+    StructuredDataMessage msg = new StructuredDataMessage("", SUCCESS,
+        type.getAction(), data);
+    this.logger.logIfEnabled(FQCN, level, READ_MARKER, msg, null);
+  }
+
+  public void logReadFailure(AuditAction type, Map<String, String> data) {
+    logReadFailure(type, data, Level.INFO, null);
+  }
+
+  public void logReadFailure(AuditAction type, Map<String, String> data, Level
+      level) {
+    logReadFailure(type, data, level, null);
+  }
+
+  public void logReadFailure(AuditAction type, Map<String, String> data,
+      Throwable exception) {
+    logReadFailure(type, data, Level.INFO, exception);
+  }
+
+  public void logReadFailure(AuditAction type, Map<String, String> data, Level
+      level, Throwable exception) {
+    StructuredDataMessage msg = new StructuredDataMessage("", FAILURE,
+        type.getAction(), data);
+    this.logger.logIfEnabled(FQCN, level, READ_MARKER, msg, exception);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
new file mode 100644
index 0000000..18241c7
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.audit;
+
+/**
+ * Enumeration for defining types of Audit Loggers in Ozone.
+ */
+public enum AuditLoggerType {
+  DNLOGGER("DNAudit"),
+  OMLOGGER("OMAudit"),
+  SCMLOGGER("SCMAudit");
+
+  private String type;
+
+  public String getType() {
+    return type;
+  }
+
+  AuditLoggerType(String type){
+    this.type = type;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
new file mode 100644
index 0000000..505b958
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.audit;
+
+import org.apache.logging.log4j.Marker;
+import org.apache.logging.log4j.MarkerManager;
+
+/**
+ * Defines audit marker types.
+ */
+public enum AuditMarker {
+  WRITE(MarkerManager.getMarker("WRITE")),
+  READ(MarkerManager.getMarker("READ"));
+
+  private Marker marker;
+
+  AuditMarker(Marker marker){
+    this.marker = marker;
+  }
+
+  public Marker getMarker(){
+    return marker;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
new file mode 100644
index 0000000..d388bca
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/Auditable.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.audit;
+
+import java.util.Map;
+
+/**
+ * Interface to make an entity auditable.
+ */
+public interface Auditable {
+  /**
+   * Must override in implementation.
+   * @return Map<String, String> with values to be logged in audit.
+   */
+  Map<String, String> toAuditMap();
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
new file mode 100644
index 0000000..3743fdd
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.audit;
+/**
+ ******************************************************************************
+ *                              Important
+ * 1. Any changes to classes in this package can render the logging
+ * framework broken.
+ * 2. The logger framework has been designed keeping in mind future
+ * plans to build a log parser.
+ * 3. Please exercise great caution when attempting changes in this package.
+ ******************************************************************************
+ *
+ *
+ * This package lays the foundation for Audit logging in Ozone.
+ * AuditLogging in Ozone has been built using log4j2 which brings in new
+ * features that facilitate turning on/off selective audit events by using
+ * MarkerFilter, checking for change in logging configuration periodically
+ * and reloading the changes, use of disruptor framework for improved
+ * Asynchronous logging.
+ *
+ * The log4j2 configurations can be specified in XML, YAML, JSON and
+ * Properties file. For Ozone, we are using the Properties file due to sheer
+ * simplicity, readability and ease of modification.
+ *
+ * log4j2 configuration file can be passed to startup command with option
+ * -Dlog4j.configurationFile unlike -Dlog4j.configuration in log4j 1.x
+ *
+ ******************************************************************************
+ *          Understanding the Audit Logging framework in Ozone.
+ ******************************************************************************
+ * **** Auditable ***
+ * This is an interface to mark an entity as auditable.
+ * This interface must be implemented by entities requiring audit logging.
+ * For example - KSMVolumeArgs, KSMBucketArgs.
+ * The implementing class must override toAuditMap() to return an
+ * instance of Map<Key, Value> where both Key and Value are String.
+ *
+ * Key: must not contain any spaces. If the key is multi word then use
+ * camel case.
+ * Value: if it is a collection/array, then it must be converted to a comma
+ * delimited string
+ *
+ * *** AuditAction ***
+ * This is an interface to define the various type of actions to be audited.
+ * To ensure separation of concern, for each sub-component you must create an
+ * Enum to implement AuditAction.
+ * Structure of Enum can be referred from the test class DummyAction.
+ *
+ * For starters, we expect following 3 implementations of AuditAction:
+ * OMAction - to define action types for Ozone Manager
+ * SCMAction - to define action types for Storage Container manager
+ * DNAction - to define action types for Datanode
+ *
+ * *** AuditEventStatus ***
+ * Enum to define Audit event status like success and failure.
+ * This is used in AuditLogger.logXXX() methods.
+ *
+ *  * *** AuditLogger ***
+ * This is where the audit logging magic unfolds.
+ * The class has 2 Markers defined - READ and WRITE.
+ * These markers are used to tag when logging events.
+ *
+ * *** AuditLoggerType ***
+ * Enum to define the various AuditLoggers in Ozone
+ *
+ * *** AuditMarker ***
+ * Enum to define various Audit Markers used in AuditLogging.
+ *
+ * ****************************************************************************
+ *                              Usage
+ * ****************************************************************************
+ * Using the AuditLogger to log events:
+ * 1. Get a logger by specifying the appropriate logger type
+ * Example: ExtendedLogger AUDIT = new AuditLogger(AuditLoggerType.OMLogger)
+ *
+ * 2. Log Read/Write and Success/Failure event as needed.
+ * Example
+ * AUDIT.logWriteSuccess(AuditAction type, Map<String, String> data, Level
+ * level)
+ *
+ * If logging is done without specifying Level, then Level implicitly
+ * defaults to INFO
+ * AUDIT.logWriteSuccess(AuditAction type, Map<String, String> data)
+ *
+ * See sample invocations in src/test in the following class:
+ * org.apache.hadoop.ozone.audit.TestOzoneAuditLogger
+ *
+ * ****************************************************************************
+ *                      Defining new Logger types
+ * ****************************************************************************
+ * New Logger type can be added with following steps:
+ * 1. Update AuditLoggerType to add the new type
+ * 2. Create new Enum by implementing AuditAction if needed
+ * 3. Ensure the required entity implements Auditable
+ *
+ * ****************************************************************************
+ *                      Defining new Marker types
+ * ****************************************************************************
+ * New Markers can be configured as follows:
+ * 1. Define new markers in AuditMarker
+ * 2. Get the Marker in AuditLogger for use in the log methods, example:
+ * private static final Marker WRITE_MARKER = AuditMarker.WRITE.getMarker();
+ * 3. Define log methods in AuditLogger to use the new Marker type
+ * 4. Call these new methods from the required classes to audit with these
+ * new markers
+ * 5. The marker based filtering can be configured in log4j2 configurations
+ * Refer log4j2.properties in src/test/resources for a sample.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
new file mode 100644
index 0000000..6044c0a
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.audit;
+
+/**
+ * Enum to define Dummy AuditAction Type for test.
+ */
+public enum DummyAction implements AuditAction {
+
+  CREATE_VOLUME("CREATE_VOLUME"),
+  CREATE_BUCKET("CREATE_BUCKET"),
+  CREATE_KEY("CREATE_KEY"),
+  READ_VOLUME("READ_VOLUME"),
+  READ_BUCKET("READ_BUCKET"),
+  READ_KEY("READ_BUCKET"),
+  UPDATE_VOLUME("UPDATE_VOLUME"),
+  UPDATE_BUCKET("UPDATE_BUCKET"),
+  UPDATE_KEY("UPDATE_KEY"),
+  DELETE_VOLUME("DELETE_VOLUME"),
+  DELETE_BUCKET("DELETE_BUCKET"),
+  DELETE_KEY("DELETE_KEY"),
+  SET_OWNER("SET_OWNER"),
+  SET_QUOTA("SET_QUOTA");
+
+  private String action;
+
+  DummyAction(String action) {
+    this.action = action;
+  }
+
+  @Override
+  public String getAction() {
+    return this.action;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
new file mode 100644
index 0000000..0c2d98f
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.audit;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * DummyEntity that implements Auditable for test purpose.
+ */
+public class DummyEntity implements Auditable {
+
+  private String key1;
+  private String key2;
+
+  public DummyEntity(){
+    this.key1 = "value1";
+    this.key2 = "value2";
+  }
+  public String getKey1() {
+    return key1;
+  }
+
+  public void setKey1(String key1) {
+    this.key1 = key1;
+  }
+
+  public String getKey2() {
+    return key2;
+  }
+
+  public void setKey2(String key2) {
+    this.key2 = key2;
+  }
+
+  @Override
+  public Map<String, String> toAuditMap() {
+    Map<String, String> auditMap = new HashMap<>();
+    auditMap.put("key1", this.key1);
+    auditMap.put("key2", this.key2);
+    return auditMap;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
new file mode 100644
index 0000000..d3cc9e4
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.audit;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.logging.log4j.Level;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test Ozone Audit Logger.
+ */
+public class TestOzoneAuditLogger {
+
+  private static final Logger LOG = LoggerFactory.getLogger
+      (TestOzoneAuditLogger.class.getName());
+  private static AuditLogger AUDIT = new AuditLogger(AuditLoggerType.OMLOGGER);
+  public DummyEntity auditableObj = new DummyEntity();
+
+  @BeforeClass
+  public static void setUp(){
+    System.setProperty("log4j.configurationFile", "log4j2.properties");
+  }
+
+  @AfterClass
+  public static void tearDown() {
+      File file = new File("audit.log");
+      if (FileUtils.deleteQuietly(file)) {
+        LOG.info(file.getName() +
+            " has been deleted as all tests have completed.");
+      } else {
+        LOG.info("audit.log could not be deleted.");
+      }
+  }
+
+  /**
+   * Ensures WriteSuccess events are logged @ INFO and above.
+   */
+  @Test
+  public void logInfoWriteSuccess() throws IOException {
+    AUDIT.logWriteSuccess(DummyAction.CREATE_VOLUME, auditableObj.toAuditMap(), Level.INFO);
+    String expected = "[INFO ] OMAudit - CREATE_VOLUME [ key1=\"value1\" " +
+        "key2=\"value2\"] SUCCESS";
+    verifyLog(expected);
+  }
+
+  /**
+   * Test to verify default log level is INFO
+   */
+  @Test
+  public void verifyDefaultLogLevel() throws IOException {
+    AUDIT.logWriteSuccess(DummyAction.CREATE_VOLUME, auditableObj.toAuditMap());
+    String expected = "[INFO ] OMAudit - CREATE_VOLUME [ key1=\"value1\" " +
+        "key2=\"value2\"] SUCCESS";
+    verifyLog(expected);
+  }
+
+  /**
+   * Test to verify WriteFailure events are logged as ERROR.
+   */
+  @Test
+  public void logErrorWriteFailure() throws IOException {
+    AUDIT.logWriteFailure(DummyAction.CREATE_VOLUME, auditableObj.toAuditMap(), Level.ERROR);
+    String expected = "[ERROR] OMAudit - CREATE_VOLUME [ key1=\"value1\" " +
+        "key2=\"value2\"] FAILURE";
+    verifyLog(expected);
+  }
+
+  /**
+   * Test to verify no READ event is logged.
+   */
+  @Test
+  public void notLogReadEvents() throws IOException {
+    AUDIT.logReadSuccess(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.INFO);
+    AUDIT.logReadFailure(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.INFO);
+    AUDIT.logReadFailure(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.ERROR);
+    AUDIT.logReadFailure(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.ERROR,
+        new Exception("test"));
+    verifyLog(null);
+  }
+
+  /**
+   * Test to ensure DEBUG level messages are not logged when INFO is enabled.
+   */
+  @Test
+  public void notLogDebugEvents() throws IOException {
+    AUDIT.logWriteSuccess(DummyAction.CREATE_VOLUME, auditableObj.toAuditMap(), Level.DEBUG);
+    AUDIT.logReadSuccess(DummyAction.READ_VOLUME, auditableObj.toAuditMap(), Level.DEBUG);
+    verifyLog(null);
+  }
+
+  public void verifyLog(String expected) throws IOException {
+      File file = new File("audit.log");
+      List<String> lines = FileUtils.readLines(file, (String)null);
+      if(expected == null){
+        // When no log entry is expected, the log file must be empty
+        assertTrue(lines.size() == 0);
+      } else {
+        // When log entry is expected, the log file will contain one line and
+        // that must be equal to the expected string
+        assertTrue(expected.equalsIgnoreCase(lines.get(0)));
+        //empty the file
+        lines.remove(0);
+        FileUtils.writeLines(file, lines, false);
+      }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java
new file mode 100644
index 0000000..1222ad0
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.audit;
+/**
+ * Unit tests of Ozone Audit Logger.
+ * For test purpose, the log4j2 configuration is loaded from file at:
+ * src/test/resources/log4j2.properties
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-hdds/common/src/test/resources/log4j2.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdds/common/src/test/resources/log4j2.properties b/hadoop-hdds/common/src/test/resources/log4j2.properties
new file mode 100644
index 0000000..d60df18
--- /dev/null
+++ b/hadoop-hdds/common/src/test/resources/log4j2.properties
@@ -0,0 +1,76 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with this
+# work for additional information regarding copyright ownership.  The ASF
+# licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# <p>
+# http://www.apache.org/licenses/LICENSE-2.0
+# <p>
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+name=PropertiesConfig
+
+# Checks for config change periodically and reloads
+monitorInterval=5
+
+filter=read, write
+# filter.read.onMatch = DENY avoids logging all READ events
+# filter.read.onMatch = ACCEPT permits logging all READ events
+# The above two settings ignore the log levels in configuration
+# filter.read.onMatch = NEUTRAL permits logging of only those READ events
+# which are attempted at log level equal or greater than log level specified
+# in the configuration
+filter.read.type = MarkerFilter
+filter.read.marker = READ
+filter.read.onMatch = DENY
+filter.read.onMismatch = NEUTRAL
+
+# filter.write.onMatch = DENY avoids logging all WRITE events
+# filter.write.onMatch = ACCEPT permits logging all WRITE events
+# The above two settings ignore the log levels in configuration
+# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events
+# which are attempted at log level equal or greater than log level specified
+# in the configuration
+filter.write.type = MarkerFilter
+filter.write.marker = WRITE
+filter.write.onMatch = NEUTRAL
+filter.write.onMismatch = NEUTRAL
+
+# Log Levels are organized from most specific to least:
+# OFF (most specific, no logging)
+# FATAL (most specific, little data)
+# ERROR
+# WARN
+# INFO
+# DEBUG
+# TRACE (least specific, a lot of data)
+# ALL (least specific, all data)
+
+appenders = console, audit
+appender.console.type = Console
+appender.console.name = STDOUT
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%-5level] %c{1} - %msg%n
+
+appender.audit.type = File
+appender.audit.name = AUDITLOG
+appender.audit.fileName=audit.log
+appender.audit.layout.type=PatternLayout
+appender.audit.layout.pattern= [%-5level] %c{1} - %msg%n
+
+loggers=audit
+logger.audit.type=AsyncLogger
+logger.audit.name=OMAudit
+logger.audit.level = INFO
+logger.audit.appenderRefs = audit
+logger.audit.appenderRef.file.ref = AUDITLOG
+
+rootLogger.level = INFO
+rootLogger.appenderRefs = stdout
+rootLogger.appenderRef.stdout.ref = STDOUT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
new file mode 100644
index 0000000..d780ea2
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.audit;
+
+/**
+ * Enum to define OM Action types for Audit.
+ */
+public enum OMAction implements AuditAction {
+
+  CREATE_VOLUME("CREATE_VOLUME"),
+  CREATE_BUCKET("CREATE_BUCKET"),
+  CREATE_KEY("CREATE_KEY"),
+  READ_VOLUME("READ_VOLUME"),
+  READ_BUCKET("READ_BUCKET"),
+  READ_KEY("READ_BUCKET"),
+  UPDATE_VOLUME("UPDATE_VOLUME"),
+  UPDATE_BUCKET("UPDATE_BUCKET"),
+  UPDATE_KEY("UPDATE_KEY"),
+  DELETE_VOLUME("DELETE_VOLUME"),
+  DELETE_BUCKET("DELETE_BUCKET"),
+  DELETE_KEY("DELETE_KEY"),
+  SET_OWNER("SET_OWNER"),
+  SET_QUOTA("SET_QUOTA");
+
+  private String action;
+
+  OMAction(String action) {
+    this.action = action;
+  }
+
+  @Override
+  public String getAction() {
+    return this.action;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0ef7e76/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
new file mode 100644
index 0000000..0f88790
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.audit;
+/**
+ * This package defines OMAction - an implementation of AuditAction
+ * OMAction defines audit action types for various actions that will be
+ * audited in OzoneManager.
+ */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org