You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2018/07/11 23:10:02 UTC

[02/56] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager to OzoneManager. Contributed by Arpit Agarwal.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
new file mode 100644
index 0000000..e50145d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java
@@ -0,0 +1,390 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeList;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.utils.BatchOperation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_USER_MAX_VOLUME_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_USER_MAX_VOLUME;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+
+/**
+ * OM volume management code.
+ */
+public class VolumeManagerImpl implements VolumeManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(VolumeManagerImpl.class);
+
+  private final OMMetadataManager metadataManager;
+  private final int maxUserVolumeCount;
+
+  /**
+   * Constructor.
+   * @param conf - Ozone configuration.
+   * @throws IOException
+   */
+  public VolumeManagerImpl(OMMetadataManager metadataManager,
+      OzoneConfiguration conf) throws IOException {
+    this.metadataManager = metadataManager;
+    this.maxUserVolumeCount = conf.getInt(OZONE_OM_USER_MAX_VOLUME,
+        OZONE_OM_USER_MAX_VOLUME_DEFAULT);
+  }
+
+  // Helpers to add and delete volume from user list
+  private void addVolumeToOwnerList(String volume, String owner,
+      BatchOperation batchOperation) throws IOException {
+    // Get the volume list
+    byte[] dbUserKey = metadataManager.getUserKey(owner);
+    byte[] volumeList  = metadataManager.get(dbUserKey);
+    List<String> prevVolList = new LinkedList<>();
+    if (volumeList != null) {
+      VolumeList vlist = VolumeList.parseFrom(volumeList);
+      prevVolList.addAll(vlist.getVolumeNamesList());
+    }
+
+    // Check the volume count
+    if (prevVolList.size() >= maxUserVolumeCount) {
+      LOG.debug("Too many volumes for user:{}", owner);
+      throw new OMException(ResultCodes.FAILED_TOO_MANY_USER_VOLUMES);
+    }
+
+    // Add the new volume to the list
+    prevVolList.add(volume);
+    VolumeList newVolList = VolumeList.newBuilder()
+        .addAllVolumeNames(prevVolList).build();
+    batchOperation.put(dbUserKey, newVolList.toByteArray());
+  }
+
+  private void delVolumeFromOwnerList(String volume, String owner,
+                                      BatchOperation batchOperation)
+      throws IOException {
+    // Get the volume list
+    byte[] dbUserKey = metadataManager.getUserKey(owner);
+    byte[] volumeList  = metadataManager.get(dbUserKey);
+    List<String> prevVolList = new LinkedList<>();
+    if (volumeList != null) {
+      VolumeList vlist = VolumeList.parseFrom(volumeList);
+      prevVolList.addAll(vlist.getVolumeNamesList());
+    } else {
+      LOG.debug("volume:{} not found for user:{}");
+      throw new OMException(ResultCodes.FAILED_USER_NOT_FOUND);
+    }
+
+    // Remove the volume from the list
+    prevVolList.remove(volume);
+    if (prevVolList.size() == 0) {
+      batchOperation.delete(dbUserKey);
+    } else {
+      VolumeList newVolList = VolumeList.newBuilder()
+          .addAllVolumeNames(prevVolList).build();
+      batchOperation.put(dbUserKey, newVolList.toByteArray());
+    }
+  }
+
+  /**
+   * Creates a volume.
+   * @param args - OmVolumeArgs.
+   */
+  @Override
+  public void createVolume(OmVolumeArgs args) throws IOException {
+    Preconditions.checkNotNull(args);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(args.getVolume());
+      byte[] volumeInfo = metadataManager.get(dbVolumeKey);
+
+      // Check of the volume already exists
+      if (volumeInfo != null) {
+        LOG.debug("volume:{} already exists", args.getVolume());
+        throw new OMException(ResultCodes.FAILED_VOLUME_ALREADY_EXISTS);
+      }
+
+      BatchOperation batch = new BatchOperation();
+      // Write the vol info
+      List<HddsProtos.KeyValue> metadataList = new LinkedList<>();
+      for (Map.Entry<String, String> entry : args.getKeyValueMap().entrySet()) {
+        metadataList.add(HddsProtos.KeyValue.newBuilder()
+            .setKey(entry.getKey()).setValue(entry.getValue()).build());
+      }
+      List<OzoneAclInfo> aclList = args.getAclMap().ozoneAclGetProtobuf();
+
+      VolumeInfo newVolumeInfo = VolumeInfo.newBuilder()
+          .setAdminName(args.getAdminName())
+          .setOwnerName(args.getOwnerName())
+          .setVolume(args.getVolume())
+          .setQuotaInBytes(args.getQuotaInBytes())
+          .addAllMetadata(metadataList)
+          .addAllVolumeAcls(aclList)
+          .setCreationTime(Time.now())
+          .build();
+      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
+
+      // Add volume to user list
+      addVolumeToOwnerList(args.getVolume(), args.getOwnerName(), batch);
+      metadataManager.writeBatch(batch);
+      LOG.debug("created volume:{} user:{}", args.getVolume(),
+          args.getOwnerName());
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Volume creation failed for user:{} volume:{}",
+            args.getOwnerName(), args.getVolume(), ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Changes the owner of a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param owner - Name of the owner.
+   * @throws IOException
+   */
+  @Override
+  public void setOwner(String volume, String owner) throws IOException {
+    Preconditions.checkNotNull(volume);
+    Preconditions.checkNotNull(owner);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("Changing volume ownership failed for user:{} volume:{}",
+            owner, volume);
+        throw  new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+
+      BatchOperation batch = new BatchOperation();
+      delVolumeFromOwnerList(volume, volumeArgs.getOwnerName(), batch);
+      addVolumeToOwnerList(volume, owner, batch);
+
+      OmVolumeArgs newVolumeArgs =
+          OmVolumeArgs.newBuilder().setVolume(volumeArgs.getVolume())
+              .setAdminName(volumeArgs.getAdminName())
+              .setOwnerName(owner)
+              .setQuotaInBytes(volumeArgs.getQuotaInBytes())
+              .setCreationTime(volumeArgs.getCreationTime())
+              .build();
+
+      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
+      batch.put(dbVolumeKey, newVolumeInfo.toByteArray());
+
+      metadataManager.writeBatch(batch);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Changing volume ownership failed for user:{} volume:{}",
+            owner, volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Changes the Quota on a volume.
+   *
+   * @param volume - Name of the volume.
+   * @param quota - Quota in bytes.
+   * @throws IOException
+   */
+  public void setQuota(String volume, long quota) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.writeLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+
+      OmVolumeArgs newVolumeArgs =
+          OmVolumeArgs.newBuilder()
+              .setVolume(volumeArgs.getVolume())
+              .setAdminName(volumeArgs.getAdminName())
+              .setOwnerName(volumeArgs.getOwnerName())
+              .setQuotaInBytes(quota)
+              .setCreationTime(volumeArgs.getCreationTime()).build();
+
+      VolumeInfo newVolumeInfo = newVolumeArgs.getProtobuf();
+      metadataManager.put(dbVolumeKey, newVolumeInfo.toByteArray());
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Changing volume quota failed for volume:{} quota:{}", volume,
+            quota, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Gets the volume information.
+   * @param volume - Volume name.
+   * @return VolumeArgs or exception is thrown.
+   * @throws IOException
+   */
+  public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.readLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      return volumeArgs;
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.warn("Info volume failed for volume:{}", volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  /**
+   * Deletes an existing empty volume.
+   *
+   * @param volume - Name of the volume.
+   * @throws IOException
+   */
+  @Override
+  public void deleteVolume(String volume) throws IOException {
+    Preconditions.checkNotNull(volume);
+    metadataManager.writeLock().lock();
+    try {
+      BatchOperation batch = new BatchOperation();
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      if (!metadataManager.isVolumeEmpty(volume)) {
+        LOG.debug("volume:{} is not empty", volume);
+        throw new OMException(ResultCodes.FAILED_VOLUME_NOT_EMPTY);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      // delete the volume from the owner list
+      // as well as delete the volume entry
+      delVolumeFromOwnerList(volume, volumeInfo.getOwnerName(), batch);
+      batch.delete(dbVolumeKey);
+      metadataManager.writeBatch(batch);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Delete volume failed for volume:{}", volume, ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.writeLock().unlock();
+    }
+  }
+
+  /**
+   * Checks if the specified user with a role can access this volume.
+   *
+   * @param volume - volume
+   * @param userAcl - user acl which needs to be checked for access
+   * @return true if the user has access for the volume, false otherwise
+   * @throws IOException
+   */
+  public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+      throws IOException {
+    Preconditions.checkNotNull(volume);
+    Preconditions.checkNotNull(userAcl);
+    metadataManager.readLock().lock();
+    try {
+      byte[] dbVolumeKey = metadataManager.getVolumeKey(volume);
+      byte[] volInfo = metadataManager.get(dbVolumeKey);
+      if (volInfo == null) {
+        LOG.debug("volume:{} does not exist", volume);
+        throw  new OMException(ResultCodes.FAILED_VOLUME_NOT_FOUND);
+      }
+
+      VolumeInfo volumeInfo = VolumeInfo.parseFrom(volInfo);
+      OmVolumeArgs volumeArgs = OmVolumeArgs.getFromProtobuf(volumeInfo);
+      Preconditions.checkState(volume.equals(volumeInfo.getVolume()));
+      return volumeArgs.getAclMap().hasAccess(userAcl);
+    } catch (IOException ex) {
+      if (!(ex instanceof OMException)) {
+        LOG.error("Check volume access failed for volume:{} user:{} rights:{}",
+            volume, userAcl.getName(), userAcl.getRights(), ex);
+      }
+      throw ex;
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public List<OmVolumeArgs> listVolumes(String userName,
+                                        String prefix, String startKey, int maxKeys) throws IOException {
+    metadataManager.readLock().lock();
+    try {
+      return metadataManager.listVolumes(
+          userName, prefix, startKey, maxKeys);
+    } finally {
+      metadataManager.readLock().unlock();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
new file mode 100644
index 0000000..55cef97
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.exceptions;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown by Ozone Manager.
+ */
+public class OMException extends IOException {
+  private final OMException.ResultCodes result;
+
+  /**
+   * Constructs an {@code IOException} with {@code null}
+   * as its error detail message.
+   */
+  public OMException(OMException.ResultCodes result) {
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the
+   * {@link #getMessage()} method)
+   */
+  public OMException(String message, OMException.ResultCodes result) {
+    super(message);
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified detail message
+   * and cause.
+   * <p>
+   * <p> Note that the detail message associated with {@code cause} is
+   * <i>not</i> automatically incorporated into this exception's detail
+   * message.
+   *
+   * @param message The detail message (which is saved for later retrieval by
+   * the
+   * {@link #getMessage()} method)
+   * @param cause The cause (which is saved for later retrieval by the {@link
+   * #getCause()} method).  (A null value is permitted, and indicates that the
+   * cause is nonexistent or unknown.)
+   * @since 1.6
+   */
+  public OMException(String message, Throwable cause,
+                     OMException.ResultCodes result) {
+    super(message, cause);
+    this.result = result;
+  }
+
+  /**
+   * Constructs an {@code IOException} with the specified cause and a
+   * detail message of {@code (cause==null ? null : cause.toString())}
+   * (which typically contains the class and detail message of {@code cause}).
+   * This constructor is useful for IO exceptions that are little more
+   * than wrappers for other throwables.
+   *
+   * @param cause The cause (which is saved for later retrieval by the {@link
+   * #getCause()} method).  (A null value is permitted, and indicates that the
+   * cause is nonexistent or unknown.)
+   * @since 1.6
+   */
+  public OMException(Throwable cause, OMException.ResultCodes result) {
+    super(cause);
+    this.result = result;
+  }
+
+  /**
+   * Returns resultCode.
+   * @return ResultCode
+   */
+  public OMException.ResultCodes getResult() {
+    return result;
+  }
+
+  /**
+   * Error codes to make it easy to decode these exceptions.
+   */
+  public enum ResultCodes {
+    FAILED_TOO_MANY_USER_VOLUMES,
+    FAILED_VOLUME_ALREADY_EXISTS,
+    FAILED_VOLUME_NOT_FOUND,
+    FAILED_VOLUME_NOT_EMPTY,
+    FAILED_USER_NOT_FOUND,
+    FAILED_BUCKET_ALREADY_EXISTS,
+    FAILED_BUCKET_NOT_FOUND,
+    FAILED_BUCKET_NOT_EMPTY,
+    FAILED_KEY_ALREADY_EXISTS,
+    FAILED_KEY_NOT_FOUND,
+    FAILED_KEY_ALLOCATION,
+    FAILED_KEY_DELETION,
+    FAILED_KEY_RENAME,
+    FAILED_INVALID_KEY_NAME,
+    FAILED_METADATA_ERROR,
+    FAILED_INTERNAL_ERROR,
+    OM_NOT_INITIALIZED,
+    SCM_VERSION_MISMATCH_ERROR
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
new file mode 100644
index 0000000..5091545
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/exceptions/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.exceptions;
+// Exception thrown by OM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..7904d5d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+/*
+ This package contains the Ozone Manager classes.
+ */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
deleted file mode 100644
index 38e7797..0000000
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java
+++ /dev/null
@@ -1,559 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.protocolPB;
-
-import com.google.common.collect.Lists;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmBucketInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
-import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs;
-import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.ksm.protocol.KeySpaceManagerProtocol;
-import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB;
-import org.apache.hadoop.ozone.ksm.exceptions.KSMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CommitKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetBucketPropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.LocateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.RenameKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.SetVolumePropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.CheckVolumeAccessResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.InfoVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.DeleteVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ListKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .KeySpaceManagerProtocolProtos.ServiceListResponse;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * This class is the server-side translator that forwards requests received on
- * {@link org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolPB}
- * to the KeySpaceManagerService server implementation.
- */
-public class KeySpaceManagerProtocolServerSideTranslatorPB implements
-    KeySpaceManagerProtocolPB {
-  private static final Logger LOG = LoggerFactory
-      .getLogger(KeySpaceManagerProtocolServerSideTranslatorPB.class);
-  private final KeySpaceManagerProtocol impl;
-
-  /**
-   * Constructs an instance of the server handler.
-   *
-   * @param impl KeySpaceManagerProtocolPB
-   */
-  public KeySpaceManagerProtocolServerSideTranslatorPB(
-      KeySpaceManagerProtocol impl) {
-    this.impl = impl;
-  }
-
-  // Convert and exception to corresponding status code
-  private Status exceptionToResponseStatus(IOException ex) {
-    if (ex instanceof KSMException) {
-      KSMException ksmException = (KSMException)ex;
-      switch (ksmException.getResult()) {
-      case FAILED_VOLUME_ALREADY_EXISTS:
-        return Status.VOLUME_ALREADY_EXISTS;
-      case FAILED_TOO_MANY_USER_VOLUMES:
-        return Status.USER_TOO_MANY_VOLUMES;
-      case FAILED_VOLUME_NOT_FOUND:
-        return Status.VOLUME_NOT_FOUND;
-      case FAILED_VOLUME_NOT_EMPTY:
-        return Status.VOLUME_NOT_EMPTY;
-      case FAILED_USER_NOT_FOUND:
-        return Status.USER_NOT_FOUND;
-      case FAILED_BUCKET_ALREADY_EXISTS:
-        return Status.BUCKET_ALREADY_EXISTS;
-      case FAILED_BUCKET_NOT_FOUND:
-        return Status.BUCKET_NOT_FOUND;
-      case FAILED_BUCKET_NOT_EMPTY:
-        return Status.BUCKET_NOT_EMPTY;
-      case FAILED_KEY_ALREADY_EXISTS:
-        return Status.KEY_ALREADY_EXISTS;
-      case FAILED_KEY_NOT_FOUND:
-        return Status.KEY_NOT_FOUND;
-      case FAILED_INVALID_KEY_NAME:
-        return Status.INVALID_KEY_NAME;
-      default:
-        return Status.INTERNAL_ERROR;
-      }
-    } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Unknown error occurs", ex);
-      }
-      return Status.INTERNAL_ERROR;
-    }
-  }
-
-  @Override
-  public CreateVolumeResponse createVolume(
-      RpcController controller, CreateVolumeRequest request)
-      throws ServiceException {
-    CreateVolumeResponse.Builder resp = CreateVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.createVolume(KsmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public SetVolumePropertyResponse setVolumeProperty(
-      RpcController controller, SetVolumePropertyRequest request)
-      throws ServiceException {
-    SetVolumePropertyResponse.Builder resp =
-        SetVolumePropertyResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    String volume = request.getVolumeName();
-
-    try {
-      if (request.hasQuotaInBytes()) {
-        long quota = request.getQuotaInBytes();
-        impl.setQuota(volume, quota);
-      } else {
-        String owner = request.getOwnerName();
-        impl.setOwner(volume, owner);
-      }
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CheckVolumeAccessResponse checkVolumeAccess(
-      RpcController controller, CheckVolumeAccessRequest request)
-      throws ServiceException {
-    CheckVolumeAccessResponse.Builder resp =
-        CheckVolumeAccessResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      boolean access = impl.checkVolumeAccess(request.getVolumeName(),
-          request.getUserAcl());
-      // if no access, set the response status as access denied
-      if (!access) {
-        resp.setStatus(Status.ACCESS_DENIED);
-      }
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-
-    return resp.build();
-  }
-
-  @Override
-  public InfoVolumeResponse infoVolume(
-      RpcController controller, InfoVolumeRequest request)
-      throws ServiceException {
-    InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    String volume = request.getVolumeName();
-    try {
-      KsmVolumeArgs ret = impl.getVolumeInfo(volume);
-      resp.setVolumeInfo(ret.getProtobuf());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public DeleteVolumeResponse deleteVolume(
-      RpcController controller, DeleteVolumeRequest request)
-      throws ServiceException {
-    DeleteVolumeResponse.Builder resp = DeleteVolumeResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.deleteVolume(request.getVolumeName());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListVolumeResponse listVolumes(
-      RpcController controller, ListVolumeRequest request)
-      throws ServiceException {
-    ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder();
-    List<KsmVolumeArgs> result = Lists.newArrayList();
-    try {
-      if (request.getScope()
-          == ListVolumeRequest.Scope.VOLUMES_BY_USER) {
-        result = impl.listVolumeByUser(request.getUserName(),
-            request.getPrefix(), request.getPrevKey(), request.getMaxKeys());
-      } else if (request.getScope()
-          == ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER) {
-        result = impl.listAllVolumes(request.getPrefix(), request.getPrevKey(),
-            request.getMaxKeys());
-      }
-
-      if (result == null) {
-        throw new ServiceException("Failed to get volumes for given scope "
-            + request.getScope());
-      }
-
-      result.forEach(item -> resp.addVolumeInfo(item.getProtobuf()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CreateBucketResponse createBucket(
-      RpcController controller, CreateBucketRequest
-      request) throws ServiceException {
-    CreateBucketResponse.Builder resp =
-        CreateBucketResponse.newBuilder();
-    try {
-      impl.createBucket(KsmBucketInfo.getFromProtobuf(
-          request.getBucketInfo()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public InfoBucketResponse infoBucket(
-      RpcController controller, InfoBucketRequest request)
-      throws ServiceException {
-    InfoBucketResponse.Builder resp =
-        InfoBucketResponse.newBuilder();
-    try {
-      KsmBucketInfo ksmBucketInfo = impl.getBucketInfo(
-          request.getVolumeName(), request.getBucketName());
-      resp.setStatus(Status.OK);
-      resp.setBucketInfo(ksmBucketInfo.getProtobuf());
-    } catch(IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse createKey(
-      RpcController controller, LocateKeyRequest request
-  ) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      HddsProtos.ReplicationType type =
-          keyArgs.hasType()? keyArgs.getType() : null;
-      HddsProtos.ReplicationFactor factor =
-          keyArgs.hasFactor()? keyArgs.getFactor() : null;
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .setDataSize(keyArgs.getDataSize())
-          .setType(type)
-          .setFactor(factor)
-          .build();
-      if (keyArgs.hasDataSize()) {
-        ksmKeyArgs.setDataSize(keyArgs.getDataSize());
-      } else {
-        ksmKeyArgs.setDataSize(0);
-      }
-      OpenKeySession openKey = impl.openKey(ksmKeyArgs);
-      resp.setKeyInfo(openKey.getKeyInfo().getProtobuf());
-      resp.setID(openKey.getId());
-      resp.setOpenVersion(openKey.getOpenVersion());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse lookupKey(
-      RpcController controller, LocateKeyRequest request
-  ) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      KsmKeyInfo keyInfo = impl.lookupKey(ksmKeyArgs);
-      resp.setKeyInfo(keyInfo.getProtobuf());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public RenameKeyResponse renameKey(
-      RpcController controller, RenameKeyRequest request)
-      throws ServiceException {
-    RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      impl.renameKey(ksmKeyArgs, request.getToKeyName());
-      resp.setStatus(Status.OK);
-    } catch (IOException e){
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public SetBucketPropertyResponse setBucketProperty(
-      RpcController controller, SetBucketPropertyRequest request)
-      throws ServiceException {
-    SetBucketPropertyResponse.Builder resp =
-        SetBucketPropertyResponse.newBuilder();
-    try {
-      impl.setBucketProperty(KsmBucketArgs.getFromProtobuf(
-          request.getBucketArgs()));
-      resp.setStatus(Status.OK);
-    } catch(IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public LocateKeyResponse deleteKey(RpcController controller,
-      LocateKeyRequest request) throws ServiceException {
-    LocateKeyResponse.Builder resp =
-        LocateKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      impl.deleteKey(ksmKeyArgs);
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public DeleteBucketResponse deleteBucket(
-      RpcController controller, DeleteBucketRequest request)
-      throws ServiceException {
-    DeleteBucketResponse.Builder resp = DeleteBucketResponse.newBuilder();
-    resp.setStatus(Status.OK);
-    try {
-      impl.deleteBucket(request.getVolumeName(), request.getBucketName());
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListBucketsResponse listBuckets(
-      RpcController controller, ListBucketsRequest request)
-      throws ServiceException {
-    ListBucketsResponse.Builder resp =
-        ListBucketsResponse.newBuilder();
-    try {
-      List<KsmBucketInfo> buckets = impl.listBuckets(
-          request.getVolumeName(),
-          request.getStartKey(),
-          request.getPrefix(),
-          request.getCount());
-      for(KsmBucketInfo bucket : buckets) {
-        resp.addBucketInfo(bucket.getProtobuf());
-      }
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ListKeysResponse listKeys(RpcController controller,
-      ListKeysRequest request) throws ServiceException {
-    ListKeysResponse.Builder resp =
-        ListKeysResponse.newBuilder();
-    try {
-      List<KsmKeyInfo> keys = impl.listKeys(
-          request.getVolumeName(),
-          request.getBucketName(),
-          request.getStartKey(),
-          request.getPrefix(),
-          request.getCount());
-      for(KsmKeyInfo key : keys) {
-        resp.addKeyInfo(key.getProtobuf());
-      }
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public CommitKeyResponse commitKey(RpcController controller,
-      CommitKeyRequest request) throws ServiceException {
-    CommitKeyResponse.Builder resp =
-        CommitKeyResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      HddsProtos.ReplicationType type =
-          keyArgs.hasType()? keyArgs.getType() : null;
-      HddsProtos.ReplicationFactor factor =
-          keyArgs.hasFactor()? keyArgs.getFactor() : null;
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .setDataSize(keyArgs.getDataSize())
-          .setType(type)
-          .setFactor(factor)
-          .build();
-      int id = request.getClientID();
-      impl.commitKey(ksmKeyArgs, id);
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public AllocateBlockResponse allocateBlock(RpcController controller,
-      AllocateBlockRequest request) throws ServiceException {
-    AllocateBlockResponse.Builder resp =
-        AllocateBlockResponse.newBuilder();
-    try {
-      KeyArgs keyArgs = request.getKeyArgs();
-      KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder()
-          .setVolumeName(keyArgs.getVolumeName())
-          .setBucketName(keyArgs.getBucketName())
-          .setKeyName(keyArgs.getKeyName())
-          .build();
-      int id = request.getClientID();
-      KsmKeyLocationInfo newLocation = impl.allocateBlock(ksmKeyArgs, id);
-      resp.setKeyLocation(newLocation.getProtobuf());
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-
-  @Override
-  public ServiceListResponse getServiceList(RpcController controller,
-      ServiceListRequest request) throws ServiceException {
-    ServiceListResponse.Builder resp = ServiceListResponse.newBuilder();
-    try {
-      resp.addAllServiceInfo(impl.getServiceList().stream()
-          .map(ServiceInfo::getProtobuf)
-          .collect(Collectors.toList()));
-      resp.setStatus(Status.OK);
-    } catch (IOException e) {
-      resp.setStatus(exceptionToResponseStatus(e));
-    }
-    return resp.build();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
new file mode 100644
index 0000000..40a88b6
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -0,0 +1,571 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CommitKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetBucketPropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.LocateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.RenameKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.CheckVolumeAccessResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.InfoVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListBucketsResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ListKeysResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.ServiceListResponse;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * This class is the server-side translator that forwards requests received on
+ * {@link OzoneManagerProtocolPB}
+ * to the OzoneManagerService server implementation.
+ */
+public class OzoneManagerProtocolServerSideTranslatorPB implements
+    OzoneManagerProtocolPB {
+  private static final Logger LOG = LoggerFactory
+      .getLogger(OzoneManagerProtocolServerSideTranslatorPB.class);
+  private final OzoneManagerProtocol impl;
+
+  /**
+   * Constructs an instance of the server handler.
+   *
+   * @param impl OzoneManagerProtocolPB
+   */
+  public OzoneManagerProtocolServerSideTranslatorPB(
+      OzoneManagerProtocol impl) {
+    this.impl = impl;
+  }
+
+  // Convert and exception to corresponding status code
+  private Status exceptionToResponseStatus(IOException ex) {
+    if (ex instanceof OMException) {
+      OMException omException = (OMException)ex;
+      switch (omException.getResult()) {
+      case FAILED_VOLUME_ALREADY_EXISTS:
+        return Status.VOLUME_ALREADY_EXISTS;
+      case FAILED_TOO_MANY_USER_VOLUMES:
+        return Status.USER_TOO_MANY_VOLUMES;
+      case FAILED_VOLUME_NOT_FOUND:
+        return Status.VOLUME_NOT_FOUND;
+      case FAILED_VOLUME_NOT_EMPTY:
+        return Status.VOLUME_NOT_EMPTY;
+      case FAILED_USER_NOT_FOUND:
+        return Status.USER_NOT_FOUND;
+      case FAILED_BUCKET_ALREADY_EXISTS:
+        return Status.BUCKET_ALREADY_EXISTS;
+      case FAILED_BUCKET_NOT_FOUND:
+        return Status.BUCKET_NOT_FOUND;
+      case FAILED_BUCKET_NOT_EMPTY:
+        return Status.BUCKET_NOT_EMPTY;
+      case FAILED_KEY_ALREADY_EXISTS:
+        return Status.KEY_ALREADY_EXISTS;
+      case FAILED_KEY_NOT_FOUND:
+        return Status.KEY_NOT_FOUND;
+      case FAILED_INVALID_KEY_NAME:
+        return Status.INVALID_KEY_NAME;
+      case FAILED_KEY_ALLOCATION:
+        return Status.KEY_ALLOCATION_ERROR;
+      case FAILED_KEY_DELETION:
+        return Status.KEY_DELETION_ERROR;
+      case FAILED_KEY_RENAME:
+        return Status.KEY_RENAME_ERROR;
+      case FAILED_METADATA_ERROR:
+        return Status.METADATA_ERROR;
+      case OM_NOT_INITIALIZED:
+        return Status.OM_NOT_INITIALIZED;
+      case SCM_VERSION_MISMATCH_ERROR:
+        return Status.SCM_VERSION_MISMATCH_ERROR;
+      default:
+        return Status.INTERNAL_ERROR;
+      }
+    } else {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Unknown error occurs", ex);
+      }
+      return Status.INTERNAL_ERROR;
+    }
+  }
+
+  @Override
+  public CreateVolumeResponse createVolume(
+      RpcController controller, CreateVolumeRequest request)
+      throws ServiceException {
+    CreateVolumeResponse.Builder resp = CreateVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.createVolume(OmVolumeArgs.getFromProtobuf(request.getVolumeInfo()));
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public SetVolumePropertyResponse setVolumeProperty(
+      RpcController controller, SetVolumePropertyRequest request)
+      throws ServiceException {
+    SetVolumePropertyResponse.Builder resp =
+        SetVolumePropertyResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    String volume = request.getVolumeName();
+
+    try {
+      if (request.hasQuotaInBytes()) {
+        long quota = request.getQuotaInBytes();
+        impl.setQuota(volume, quota);
+      } else {
+        String owner = request.getOwnerName();
+        impl.setOwner(volume, owner);
+      }
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CheckVolumeAccessResponse checkVolumeAccess(
+      RpcController controller, CheckVolumeAccessRequest request)
+      throws ServiceException {
+    CheckVolumeAccessResponse.Builder resp =
+        CheckVolumeAccessResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      boolean access = impl.checkVolumeAccess(request.getVolumeName(),
+          request.getUserAcl());
+      // if no access, set the response status as access denied
+      if (!access) {
+        resp.setStatus(Status.ACCESS_DENIED);
+      }
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+
+    return resp.build();
+  }
+
+  @Override
+  public InfoVolumeResponse infoVolume(
+      RpcController controller, InfoVolumeRequest request)
+      throws ServiceException {
+    InfoVolumeResponse.Builder resp = InfoVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    String volume = request.getVolumeName();
+    try {
+      OmVolumeArgs ret = impl.getVolumeInfo(volume);
+      resp.setVolumeInfo(ret.getProtobuf());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public DeleteVolumeResponse deleteVolume(
+      RpcController controller, DeleteVolumeRequest request)
+      throws ServiceException {
+    DeleteVolumeResponse.Builder resp = DeleteVolumeResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.deleteVolume(request.getVolumeName());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListVolumeResponse listVolumes(
+      RpcController controller, ListVolumeRequest request)
+      throws ServiceException {
+    ListVolumeResponse.Builder resp = ListVolumeResponse.newBuilder();
+    List<OmVolumeArgs> result = Lists.newArrayList();
+    try {
+      if (request.getScope()
+          == ListVolumeRequest.Scope.VOLUMES_BY_USER) {
+        result = impl.listVolumeByUser(request.getUserName(),
+            request.getPrefix(), request.getPrevKey(), request.getMaxKeys());
+      } else if (request.getScope()
+          == ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER) {
+        result = impl.listAllVolumes(request.getPrefix(), request.getPrevKey(),
+            request.getMaxKeys());
+      }
+
+      if (result == null) {
+        throw new ServiceException("Failed to get volumes for given scope "
+            + request.getScope());
+      }
+
+      result.forEach(item -> resp.addVolumeInfo(item.getProtobuf()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CreateBucketResponse createBucket(
+      RpcController controller, CreateBucketRequest
+      request) throws ServiceException {
+    CreateBucketResponse.Builder resp =
+        CreateBucketResponse.newBuilder();
+    try {
+      impl.createBucket(OmBucketInfo.getFromProtobuf(
+          request.getBucketInfo()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public InfoBucketResponse infoBucket(
+      RpcController controller, InfoBucketRequest request)
+      throws ServiceException {
+    InfoBucketResponse.Builder resp =
+        InfoBucketResponse.newBuilder();
+    try {
+      OmBucketInfo omBucketInfo = impl.getBucketInfo(
+          request.getVolumeName(), request.getBucketName());
+      resp.setStatus(Status.OK);
+      resp.setBucketInfo(omBucketInfo.getProtobuf());
+    } catch(IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse createKey(
+      RpcController controller, LocateKeyRequest request
+  ) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      HddsProtos.ReplicationType type =
+          keyArgs.hasType()? keyArgs.getType() : null;
+      HddsProtos.ReplicationFactor factor =
+          keyArgs.hasFactor()? keyArgs.getFactor() : null;
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .setDataSize(keyArgs.getDataSize())
+          .setType(type)
+          .setFactor(factor)
+          .build();
+      if (keyArgs.hasDataSize()) {
+        omKeyArgs.setDataSize(keyArgs.getDataSize());
+      } else {
+        omKeyArgs.setDataSize(0);
+      }
+      OpenKeySession openKey = impl.openKey(omKeyArgs);
+      resp.setKeyInfo(openKey.getKeyInfo().getProtobuf());
+      resp.setID(openKey.getId());
+      resp.setOpenVersion(openKey.getOpenVersion());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse lookupKey(
+      RpcController controller, LocateKeyRequest request
+  ) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      OmKeyInfo keyInfo = impl.lookupKey(omKeyArgs);
+      resp.setKeyInfo(keyInfo.getProtobuf());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public RenameKeyResponse renameKey(
+      RpcController controller, RenameKeyRequest request)
+      throws ServiceException {
+    RenameKeyResponse.Builder resp = RenameKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      impl.renameKey(omKeyArgs, request.getToKeyName());
+      resp.setStatus(Status.OK);
+    } catch (IOException e){
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public SetBucketPropertyResponse setBucketProperty(
+      RpcController controller, SetBucketPropertyRequest request)
+      throws ServiceException {
+    SetBucketPropertyResponse.Builder resp =
+        SetBucketPropertyResponse.newBuilder();
+    try {
+      impl.setBucketProperty(OmBucketArgs.getFromProtobuf(
+          request.getBucketArgs()));
+      resp.setStatus(Status.OK);
+    } catch(IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public LocateKeyResponse deleteKey(RpcController controller,
+      LocateKeyRequest request) throws ServiceException {
+    LocateKeyResponse.Builder resp =
+        LocateKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      impl.deleteKey(omKeyArgs);
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public DeleteBucketResponse deleteBucket(
+      RpcController controller, DeleteBucketRequest request)
+      throws ServiceException {
+    DeleteBucketResponse.Builder resp = DeleteBucketResponse.newBuilder();
+    resp.setStatus(Status.OK);
+    try {
+      impl.deleteBucket(request.getVolumeName(), request.getBucketName());
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListBucketsResponse listBuckets(
+      RpcController controller, ListBucketsRequest request)
+      throws ServiceException {
+    ListBucketsResponse.Builder resp =
+        ListBucketsResponse.newBuilder();
+    try {
+      List<OmBucketInfo> buckets = impl.listBuckets(
+          request.getVolumeName(),
+          request.getStartKey(),
+          request.getPrefix(),
+          request.getCount());
+      for(OmBucketInfo bucket : buckets) {
+        resp.addBucketInfo(bucket.getProtobuf());
+      }
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ListKeysResponse listKeys(RpcController controller,
+      ListKeysRequest request) throws ServiceException {
+    ListKeysResponse.Builder resp =
+        ListKeysResponse.newBuilder();
+    try {
+      List<OmKeyInfo> keys = impl.listKeys(
+          request.getVolumeName(),
+          request.getBucketName(),
+          request.getStartKey(),
+          request.getPrefix(),
+          request.getCount());
+      for(OmKeyInfo key : keys) {
+        resp.addKeyInfo(key.getProtobuf());
+      }
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public CommitKeyResponse commitKey(RpcController controller,
+      CommitKeyRequest request) throws ServiceException {
+    CommitKeyResponse.Builder resp =
+        CommitKeyResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      HddsProtos.ReplicationType type =
+          keyArgs.hasType()? keyArgs.getType() : null;
+      HddsProtos.ReplicationFactor factor =
+          keyArgs.hasFactor()? keyArgs.getFactor() : null;
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .setDataSize(keyArgs.getDataSize())
+          .setType(type)
+          .setFactor(factor)
+          .build();
+      int id = request.getClientID();
+      impl.commitKey(omKeyArgs, id);
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public AllocateBlockResponse allocateBlock(RpcController controller,
+      AllocateBlockRequest request) throws ServiceException {
+    AllocateBlockResponse.Builder resp =
+        AllocateBlockResponse.newBuilder();
+    try {
+      KeyArgs keyArgs = request.getKeyArgs();
+      OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
+          .setVolumeName(keyArgs.getVolumeName())
+          .setBucketName(keyArgs.getBucketName())
+          .setKeyName(keyArgs.getKeyName())
+          .build();
+      int id = request.getClientID();
+      OmKeyLocationInfo newLocation = impl.allocateBlock(omKeyArgs, id);
+      resp.setKeyLocation(newLocation.getProtobuf());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public ServiceListResponse getServiceList(RpcController controller,
+      ServiceListRequest request) throws ServiceException {
+    ServiceListResponse.Builder resp = ServiceListResponse.newBuilder();
+    try {
+      resp.addAllServiceInfo(impl.getServiceList().stream()
+          .map(ServiceInfo::getProtobuf)
+          .collect(Collectors.toList()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
index e9c2430..9bc393d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java
@@ -18,5 +18,5 @@
 package org.apache.hadoop.ozone.protocolPB;
 
 /**
- * KSM protocol buffer translators.
+ * OM protocol buffer translators.
  */
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
deleted file mode 100644
index 7f18028..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/index.html
+++ /dev/null
@@ -1,70 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
-        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<html lang="en">
-<head>
-    <meta charset="utf-8">
-    <meta http-equiv="X-UA-Compatible" content="IE=edge">
-    <meta name="viewport" content="width=device-width, initial-scale=1">
-    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
-    <meta name="description" content="HDFS Key Space Manager">
-
-    <title>HDFS Key Space Manager</title>
-
-    <link href="static/bootstrap-3.3.7/css/bootstrap.min.css" rel="stylesheet">
-    <link href="static/hadoop.css" rel="stylesheet">
-    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
-
-    <link href="static/ozone.css" rel="stylesheet">
-
-</head>
-
-<body ng-app="ksm">
-
-<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
-    <div class="container-fluid">
-        <div class="navbar-header">
-            <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
-                    aria-expanded="false" aria-controls="navbar">
-                <span class="sr-only">Toggle navigation</span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-                <span class="icon-bar"></span>
-            </button>
-            <a class="navbar-brand" href="#">HDFS KSM</a>
-        </div>
-        <navmenu
-                metrics="{ 'Ksm metrics' : '#!/metrics/ksm', 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
-    </div>
-</header>
-
-<div class="container-fluid">
-    <ng-view></ng-view>
-</div><!-- /.container -->
-
-<script src="static/jquery-3.3.1.min.js"></script>
-<script src="static/angular-1.6.4.min.js"></script>
-<script src="static/angular-route-1.6.4.min.js"></script>
-<script src="static/d3-3.5.17.min.js"></script>
-<script src="static/nvd3-1.8.5.min.js"></script>
-<script src="static/angular-nvd3-1.0.9.min.js"></script>
-<script src="static/ozone.js"></script>
-<script src="ksm.js"></script>
-<script src="static/bootstrap-3.3.7/js/bootstrap.min.js"></script>
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
deleted file mode 100644
index e63fb00..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm-metrics.html
+++ /dev/null
@@ -1,44 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<h1>KSM Metrics</h1>
-
-<div ng-repeat="(type,numbers) in $ctrl.metrics.nums">
-    <h2>{{type}}</h2>
-    <div class="container">
-        <div class="col-md-6">
-            <h3>Requests ({{numbers.ops}} ops)</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.all"></nvd3>
-        </div>
-        <div class="col-md-6">
-            <h3>Failures</h3>
-            <nvd3 options="$ctrl.graphOptions"
-                  data="numbers.failures"></nvd3>
-        </div>
-    </div>
-</div>
-
-<div ng-show="$ctrl.metrics.others.length > 0">
-    <h2>Other JMX properties</h2>
-
-    <table class="table">
-        <tr ng-repeat="metric in $ctrl.metrics.others">
-            <td>{{metric.key}}</td>
-            <td>{{metric.value}}</td>
-        </tr>
-    </table>
-</div>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
deleted file mode 100644
index ab6f73b..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/ksm.js
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-(function () {
-    "use strict";
-
-    var isIgnoredJmxKeys = function (key) {
-        return key == 'name' || key == 'modelerType' || key.match(/tag.*/);
-    };
-
-    angular.module('ksm', ['ozone', 'nvd3']);
-    angular.module('ksm').config(function ($routeProvider) {
-        $routeProvider
-            .when("/metrics/ksm", {
-                template: "<ksm-metrics></ksm-metrics>"
-            });
-    });
-    angular.module('ksm').component('ksmMetrics', {
-        templateUrl: 'ksm-metrics.html',
-        controller: function ($http) {
-            var ctrl = this;
-
-            ctrl.graphOptions = {
-                chart: {
-                    type: 'pieChart',
-                    height: 500,
-                    x: function (d) {
-                        return d.key;
-                    },
-                    y: function (d) {
-                        return d.value;
-                    },
-                    showLabels: true,
-                    labelType: 'value',
-                    duration: 500,
-                    labelThreshold: 0.01,
-                    valueFormat: function(d) {
-                        return d3.format('d')(d);
-                    },
-                    legend: {
-                        margin: {
-                            top: 5,
-                            right: 35,
-                            bottom: 5,
-                            left: 0
-                        }
-                    }
-                }
-            };
-
-
-            $http.get("jmx?qry=Hadoop:service=KeySpaceManager,name=KSMMetrics")
-                .then(function (result) {
-
-                    var groupedMetrics = {others: [], nums: {}};
-                    var metrics = result.data.beans[0]
-                    for (var key in metrics) {
-                        var numericalStatistic = key.match(/Num([A-Z][a-z]+)(.+?)(Fails)?$/);
-                        if (numericalStatistic) {
-                            var type = numericalStatistic[1];
-                            var name = numericalStatistic[2];
-                            var failed = numericalStatistic[3];
-                            groupedMetrics.nums[type] = groupedMetrics.nums[type] || {
-                                    failures: [],
-                                    all: []
-                                };
-                            if (failed) {
-                                groupedMetrics.nums[type].failures.push({
-                                    key: name,
-                                    value: metrics[key]
-                                })
-                            } else {
-                                if (name == "Ops") {
-                                    groupedMetrics.nums[type].ops = metrics[key]
-                                } else {
-                                    groupedMetrics.nums[type].all.push({
-                                        key: name,
-                                        value: metrics[key]
-                                    })
-                                }
-                            }
-                        } else if (isIgnoredJmxKeys(key)) {
-                            //ignore
-                        } else {
-                            groupedMetrics.others.push({
-                                'key': key,
-                                'value': metrics[key]
-                            });
-                        }
-                    }
-                    ctrl.metrics = groupedMetrics;
-                })
-        }
-    });
-
-})();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
deleted file mode 100644
index e442adc..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.css
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- *   Licensed to the Apache Software Foundation (ASF) under one or more
- *  contributor license agreements.  See the NOTICE file distributed with
- *  this work for additional information regarding copyright ownership.
- *  The ASF licenses this file to You under the Apache License, Version 2.0
- *  (the "License"); you may not use this file except in compliance with
- *  the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
-*/
-body {
-  padding-top: 50px;
-}
-.starter-template {
-  padding: 40px 15px;
-  text-align: center;
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html b/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
deleted file mode 100644
index 0821899..0000000
--- a/hadoop-ozone/ozone-manager/src/main/webapps/ksm/main.html
+++ /dev/null
@@ -1,18 +0,0 @@
-<!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-  -->
-<overview>
-</overview>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
new file mode 100644
index 0000000..ba54cb2
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/index.html
@@ -0,0 +1,70 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+        "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<html lang="en">
+<head>
+    <meta charset="utf-8">
+    <meta http-equiv="X-UA-Compatible" content="IE=edge">
+    <meta name="viewport" content="width=device-width, initial-scale=1">
+    <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
+    <meta name="description" content="Ozone Manager">
+
+    <title>Ozone Manager</title>
+
+    <link href="static/bootstrap-3.3.7/css/bootstrap.min.css" rel="stylesheet">
+    <link href="static/hadoop.css" rel="stylesheet">
+    <link href="static/nvd3-1.8.5.min.css" rel="stylesheet">
+
+    <link href="static/ozone.css" rel="stylesheet">
+
+</head>
+
+<body ng-app="ozoneManager">
+
+<header class="navbar navbar-inverse navbar-fixed-top bs-docs-nav">
+    <div class="container-fluid">
+        <div class="navbar-header">
+            <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar"
+                    aria-expanded="false" aria-controls="navbar">
+                <span class="sr-only">Toggle navigation</span>
+                <span class="icon-bar"></span>
+                <span class="icon-bar"></span>
+                <span class="icon-bar"></span>
+            </button>
+            <a class="navbar-brand" href="#">Ozone Manager</a>
+        </div>
+        <navmenu
+                metrics="{ 'OM metrics' : '#!/metrics/ozoneManager', 'Rpc metrics' : '#!/metrics/rpc'}"></navmenu>
+    </div>
+</header>
+
+<div class="container-fluid">
+    <ng-view></ng-view>
+</div><!-- /.container -->
+
+<script src="static/jquery-3.3.1.min.js"></script>
+<script src="static/angular-1.6.4.min.js"></script>
+<script src="static/angular-route-1.6.4.min.js"></script>
+<script src="static/d3-3.5.17.min.js"></script>
+<script src="static/nvd3-1.8.5.min.js"></script>
+<script src="static/angular-nvd3-1.0.9.min.js"></script>
+<script src="static/ozone.js"></script>
+<script src="ozoneManager.js"></script>
+<script src="static/bootstrap-3.3.7/js/bootstrap.min.js"></script>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
new file mode 100644
index 0000000..e442adc
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.css
@@ -0,0 +1,23 @@
+/**
+ *   Licensed to the Apache Software Foundation (ASF) under one or more
+ *  contributor license agreements.  See the NOTICE file distributed with
+ *  this work for additional information regarding copyright ownership.
+ *  The ASF licenses this file to You under the Apache License, Version 2.0
+ *  (the "License"); you may not use this file except in compliance with
+ *  the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+*/
+body {
+  padding-top: 50px;
+}
+.starter-template {
+  padding: 40px 15px;
+  text-align: center;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
new file mode 100644
index 0000000..0821899
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/main.html
@@ -0,0 +1,18 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+  -->
+<overview>
+</overview>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
----------------------------------------------------------------------
diff --git a/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
new file mode 100644
index 0000000..15fba2f
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/webapps/ozoneManager/om-metrics.html
@@ -0,0 +1,44 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<h1>OzoneManager Metrics</h1>
+
+<div ng-repeat="(type,numbers) in $ctrl.metrics.nums">
+    <h2>{{type}}</h2>
+    <div class="container">
+        <div class="col-md-6">
+            <h3>Requests ({{numbers.ops}} ops)</h3>
+            <nvd3 options="$ctrl.graphOptions"
+                  data="numbers.all"></nvd3>
+        </div>
+        <div class="col-md-6">
+            <h3>Failures</h3>
+            <nvd3 options="$ctrl.graphOptions"
+                  data="numbers.failures"></nvd3>
+        </div>
+    </div>
+</div>
+
+<div ng-show="$ctrl.metrics.others.length > 0">
+    <h2>Other JMX properties</h2>
+
+    <table class="table">
+        <tr ng-repeat="metric in $ctrl.metrics.others">
+            <td>{{metric.key}}</td>
+            <td>{{metric.value}}</td>
+        </tr>
+    </table>
+</div>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org