You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2018/07/11 23:10:11 UTC
[11/56] [abbrv] hadoop git commit: HDDS-167. Rename KeySpaceManager
to OzoneManager. Contributed by Arpit Agarwal.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
new file mode 100644
index 0000000..05c8d45
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -0,0 +1,277 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
+import org.apache.hadoop.util.Time;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Args for key block. The block instance for the key requested in putKey.
+ * This is returned from OM to client, and client use class to talk to
+ * datanode. Also, this is the metadata written to om.db on server side.
+ */
+public final class OmKeyInfo {
+ private final String volumeName;
+ private final String bucketName;
+ // name of key client specified
+ private String keyName;
+ private long dataSize;
+ private List<OmKeyLocationInfoGroup> keyLocationVersions;
+ private final long creationTime;
+ private long modificationTime;
+ private HddsProtos.ReplicationType type;
+ private HddsProtos.ReplicationFactor factor;
+
+ private OmKeyInfo(String volumeName, String bucketName, String keyName,
+ List<OmKeyLocationInfoGroup> versions, long dataSize,
+ long creationTime, long modificationTime, HddsProtos.ReplicationType type,
+ HddsProtos.ReplicationFactor factor) {
+ this.volumeName = volumeName;
+ this.bucketName = bucketName;
+ this.keyName = keyName;
+ this.dataSize = dataSize;
+ // it is important that the versions are ordered from old to new.
+ // Do this sanity check when versions got loaded on creating OmKeyInfo.
+ // TODO : this is not necessary, here only because versioning is still a
+ // work in-progress, remove this following check when versioning is
+ // complete and prove correctly functioning
+ long currentVersion = -1;
+ for (OmKeyLocationInfoGroup version : versions) {
+ Preconditions.checkArgument(
+ currentVersion + 1 == version.getVersion());
+ currentVersion = version.getVersion();
+ }
+ this.keyLocationVersions = versions;
+ this.creationTime = creationTime;
+ this.modificationTime = modificationTime;
+ this.factor = factor;
+ this.type = type;
+ }
+
+ public String getVolumeName() {
+ return volumeName;
+ }
+
+ public String getBucketName() {
+ return bucketName;
+ }
+
+ public HddsProtos.ReplicationType getType() {
+ return type;
+ }
+
+ public HddsProtos.ReplicationFactor getFactor() {
+ return factor;
+ }
+
+ public String getKeyName() {
+ return keyName;
+ }
+
+ public void setKeyName(String keyName) {
+ this.keyName = keyName;
+ }
+
+ public long getDataSize() {
+ return dataSize;
+ }
+
+ public void setDataSize(long size) {
+ this.dataSize = size;
+ }
+
+ public synchronized OmKeyLocationInfoGroup getLatestVersionLocations()
+ throws IOException {
+ return keyLocationVersions.size() == 0? null :
+ keyLocationVersions.get(keyLocationVersions.size() - 1);
+ }
+
+ public List<OmKeyLocationInfoGroup> getKeyLocationVersions() {
+ return keyLocationVersions;
+ }
+
+ public void updateModifcationTime() {
+ this.modificationTime = Time.monotonicNow();
+ }
+
+ /**
+ * Append a set of blocks to the latest version. Note that these blocks are
+ * part of the latest version, not a new version.
+ *
+ * @param newLocationList the list of new blocks to be added.
+ * @throws IOException
+ */
+ public synchronized void appendNewBlocks(
+ List<OmKeyLocationInfo> newLocationList) throws IOException {
+ if (keyLocationVersions.size() == 0) {
+ throw new IOException("Appending new block, but no version exist");
+ }
+ OmKeyLocationInfoGroup currentLatestVersion =
+ keyLocationVersions.get(keyLocationVersions.size() - 1);
+ currentLatestVersion.appendNewBlocks(newLocationList);
+ setModificationTime(Time.now());
+ }
+
+ /**
+ * Add a new set of blocks. The new blocks will be added as appending a new
+ * version to the all version list.
+ *
+ * @param newLocationList the list of new blocks to be added.
+ * @throws IOException
+ */
+ public synchronized long addNewVersion(
+ List<OmKeyLocationInfo> newLocationList) throws IOException {
+ long latestVersionNum;
+ if (keyLocationVersions.size() == 0) {
+ // no version exist, these blocks are the very first version.
+ keyLocationVersions.add(new OmKeyLocationInfoGroup(0, newLocationList));
+ latestVersionNum = 0;
+ } else {
+ // it is important that the new version are always at the tail of the list
+ OmKeyLocationInfoGroup currentLatestVersion =
+ keyLocationVersions.get(keyLocationVersions.size() - 1);
+ // the new version is created based on the current latest version
+ OmKeyLocationInfoGroup newVersion =
+ currentLatestVersion.generateNextVersion(newLocationList);
+ keyLocationVersions.add(newVersion);
+ latestVersionNum = newVersion.getVersion();
+ }
+ setModificationTime(Time.now());
+ return latestVersionNum;
+ }
+
+ public long getCreationTime() {
+ return creationTime;
+ }
+
+ public long getModificationTime() {
+ return modificationTime;
+ }
+
+ public void setModificationTime(long modificationTime) {
+ this.modificationTime = modificationTime;
+ }
+
+ /**
+ * Builder of OmKeyInfo.
+ */
+ public static class Builder {
+ private String volumeName;
+ private String bucketName;
+ private String keyName;
+ private long dataSize;
+ private List<OmKeyLocationInfoGroup> omKeyLocationInfoGroups;
+ private long creationTime;
+ private long modificationTime;
+ private HddsProtos.ReplicationType type;
+ private HddsProtos.ReplicationFactor factor;
+
+ public Builder setVolumeName(String volume) {
+ this.volumeName = volume;
+ return this;
+ }
+
+ public Builder setBucketName(String bucket) {
+ this.bucketName = bucket;
+ return this;
+ }
+
+ public Builder setKeyName(String key) {
+ this.keyName = key;
+ return this;
+ }
+
+ public Builder setOmKeyLocationInfos(
+ List<OmKeyLocationInfoGroup> omKeyLocationInfoList) {
+ this.omKeyLocationInfoGroups = omKeyLocationInfoList;
+ return this;
+ }
+
+ public Builder setDataSize(long size) {
+ this.dataSize = size;
+ return this;
+ }
+
+ public Builder setCreationTime(long crTime) {
+ this.creationTime = crTime;
+ return this;
+ }
+
+ public Builder setModificationTime(long mTime) {
+ this.modificationTime = mTime;
+ return this;
+ }
+
+ public Builder setReplicationFactor(HddsProtos.ReplicationFactor factor) {
+ this.factor = factor;
+ return this;
+ }
+
+ public Builder setReplicationType(HddsProtos.ReplicationType type) {
+ this.type = type;
+ return this;
+ }
+
+ public OmKeyInfo build() {
+ return new OmKeyInfo(
+ volumeName, bucketName, keyName, omKeyLocationInfoGroups,
+ dataSize, creationTime, modificationTime, type, factor);
+ }
+ }
+
+ public KeyInfo getProtobuf() {
+ long latestVersion = keyLocationVersions.size() == 0 ? -1 :
+ keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
+ return KeyInfo.newBuilder()
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(keyName)
+ .setDataSize(dataSize)
+ .setFactor(factor)
+ .setType(type)
+ .addAllKeyLocationList(keyLocationVersions.stream()
+ .map(OmKeyLocationInfoGroup::getProtobuf)
+ .collect(Collectors.toList()))
+ .setLatestVersion(latestVersion)
+ .setCreationTime(creationTime)
+ .setModificationTime(modificationTime)
+ .build();
+ }
+
+ public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) {
+ return new OmKeyInfo(
+ keyInfo.getVolumeName(),
+ keyInfo.getBucketName(),
+ keyInfo.getKeyName(),
+ keyInfo.getKeyLocationListList().stream()
+ .map(OmKeyLocationInfoGroup::getFromProtobuf)
+ .collect(Collectors.toList()),
+ keyInfo.getDataSize(),
+ keyInfo.getCreationTime(),
+ keyInfo.getModificationTime(),
+ keyInfo.getType(),
+ keyInfo.getFactor());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
new file mode 100644
index 0000000..3f6666d
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
+
+/**
+ * One key can be too huge to fit in one container. In which case it gets split
+ * into a number of subkeys. This class represents one such subkey instance.
+ */
+public final class OmKeyLocationInfo {
+ private final BlockID blockID;
+ private final boolean shouldCreateContainer;
+ // the id of this subkey in all the subkeys.
+ private final long length;
+ private final long offset;
+ // the version number indicating when this block was added
+ private long createVersion;
+
+ private OmKeyLocationInfo(BlockID blockID, boolean shouldCreateContainer,
+ long length, long offset) {
+ this.blockID = blockID;
+ this.shouldCreateContainer = shouldCreateContainer;
+ this.length = length;
+ this.offset = offset;
+ }
+
+ public void setCreateVersion(long version) {
+ createVersion = version;
+ }
+
+ public long getCreateVersion() {
+ return createVersion;
+ }
+
+ public BlockID getBlockID() {
+ return blockID;
+ }
+
+ public long getContainerID() {
+ return blockID.getContainerID();
+ }
+
+ public long getLocalID() {
+ return blockID.getLocalID();
+ }
+
+ public boolean getShouldCreateContainer() {
+ return shouldCreateContainer;
+ }
+
+ public long getLength() {
+ return length;
+ }
+
+ public long getOffset() {
+ return offset;
+ }
+
+ /**
+ * Builder of OmKeyLocationInfo.
+ */
+ public static class Builder {
+ private BlockID blockID;
+ private boolean shouldCreateContainer;
+ private long length;
+ private long offset;
+
+ public Builder setBlockID(BlockID blockId) {
+ this.blockID = blockId;
+ return this;
+ }
+
+ public Builder setShouldCreateContainer(boolean create) {
+ this.shouldCreateContainer = create;
+ return this;
+ }
+
+ public Builder setLength(long len) {
+ this.length = len;
+ return this;
+ }
+
+ public Builder setOffset(long off) {
+ this.offset = off;
+ return this;
+ }
+
+ public OmKeyLocationInfo build() {
+ return new OmKeyLocationInfo(blockID,
+ shouldCreateContainer, length, offset);
+ }
+ }
+
+ public KeyLocation getProtobuf() {
+ return KeyLocation.newBuilder()
+ .setBlockID(blockID.getProtobuf())
+ .setShouldCreateContainer(shouldCreateContainer)
+ .setLength(length)
+ .setOffset(offset)
+ .setCreateVersion(createVersion)
+ .build();
+ }
+
+ public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
+ OmKeyLocationInfo info = new OmKeyLocationInfo(
+ BlockID.getFromProtobuf(keyLocation.getBlockID()),
+ keyLocation.getShouldCreateContainer(),
+ keyLocation.getLength(),
+ keyLocation.getOffset());
+ info.setCreateVersion(keyLocation.getCreateVersion());
+ return info;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
new file mode 100644
index 0000000..8bdcee3
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * A list of key locations. This class represents one single version of the
+ * blocks of a key.
+ */
+public class OmKeyLocationInfoGroup {
+ private final long version;
+ private final List<OmKeyLocationInfo> locationList;
+
+ public OmKeyLocationInfoGroup(long version,
+ List<OmKeyLocationInfo> locations) {
+ this.version = version;
+ this.locationList = locations;
+ }
+
+ /**
+ * Return only the blocks that are created in the most recent version.
+ *
+ * @return the list of blocks that are created in the latest version.
+ */
+ public List<OmKeyLocationInfo> getBlocksLatestVersionOnly() {
+ List<OmKeyLocationInfo> list = new ArrayList<>();
+ locationList.stream().filter(x -> x.getCreateVersion() == version)
+ .forEach(list::add);
+ return list;
+ }
+
+ public long getVersion() {
+ return version;
+ }
+
+ public List<OmKeyLocationInfo> getLocationList() {
+ return locationList;
+ }
+
+ public KeyLocationList getProtobuf() {
+ return KeyLocationList.newBuilder()
+ .setVersion(version)
+ .addAllKeyLocations(
+ locationList.stream().map(OmKeyLocationInfo::getProtobuf)
+ .collect(Collectors.toList()))
+ .build();
+ }
+
+ public static OmKeyLocationInfoGroup getFromProtobuf(
+ KeyLocationList keyLocationList) {
+ return new OmKeyLocationInfoGroup(
+ keyLocationList.getVersion(),
+ keyLocationList.getKeyLocationsList().stream()
+ .map(OmKeyLocationInfo::getFromProtobuf)
+ .collect(Collectors.toList()));
+ }
+
+ /**
+ * Given a new block location, generate a new version list based upon this
+ * one.
+ *
+ * @param newLocationList a list of new location to be added.
+ * @return
+ */
+ OmKeyLocationInfoGroup generateNextVersion(
+ List<OmKeyLocationInfo> newLocationList) throws IOException {
+ // TODO : revisit if we can do this method more efficiently
+ // one potential inefficiency here is that later version always include
+ // older ones. e.g. v1 has B1, then v2, v3...will all have B1 and only add
+ // more
+ List<OmKeyLocationInfo> newList = new ArrayList<>();
+ newList.addAll(locationList);
+ for (OmKeyLocationInfo newInfo : newLocationList) {
+ // all these new blocks will have addVersion of current version + 1
+ newInfo.setCreateVersion(version + 1);
+ newList.add(newInfo);
+ }
+ return new OmKeyLocationInfoGroup(version + 1, newList);
+ }
+
+ void appendNewBlocks(List<OmKeyLocationInfo> newLocationList)
+ throws IOException {
+ for (OmKeyLocationInfo info : newLocationList) {
+ info.setCreateVersion(version);
+ locationList.add(info);
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("version:").append(version).append(" ");
+ for (OmKeyLocationInfo kli : locationList) {
+ sb.append(kli.getLocalID()).append(" || ");
+ }
+ return sb.toString();
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
new file mode 100644
index 0000000..de75a05
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmOzoneAclMap.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclRights;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclType;
+
+import java.util.List;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.ArrayList;
+import java.util.HashMap;
+
+/**
+ * This helper class keeps a map of all user and their permissions.
+ */
+public class OmOzoneAclMap {
+ // per Acl Type user:rights map
+ private ArrayList<Map<String, OzoneAclRights>> aclMaps;
+
+ OmOzoneAclMap() {
+ aclMaps = new ArrayList<>();
+ for (OzoneAclType aclType : OzoneAclType.values()) {
+ aclMaps.add(aclType.ordinal(), new HashMap<>());
+ }
+ }
+
+ private Map<String, OzoneAclRights> getMap(OzoneAclType type) {
+ return aclMaps.get(type.ordinal());
+ }
+
+ // For a given acl type and user, get the stored acl
+ private OzoneAclRights getAcl(OzoneAclType type, String user) {
+ return getMap(type).get(user);
+ }
+
+ // Add a new acl to the map
+ public void addAcl(OzoneAclInfo acl) {
+ getMap(acl.getType()).put(acl.getName(), acl.getRights());
+ }
+
+ // for a given acl, check if the user has access rights
+ public boolean hasAccess(OzoneAclInfo acl) {
+ OzoneAclRights storedRights = getAcl(acl.getType(), acl.getName());
+ if (storedRights != null) {
+ switch (acl.getRights()) {
+ case READ:
+ return (storedRights == OzoneAclRights.READ)
+ || (storedRights == OzoneAclRights.READ_WRITE);
+ case WRITE:
+ return (storedRights == OzoneAclRights.WRITE)
+ || (storedRights == OzoneAclRights.READ_WRITE);
+ case READ_WRITE:
+ return (storedRights == OzoneAclRights.READ_WRITE);
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+
+ // Convert this map to OzoneAclInfo Protobuf List
+ public List<OzoneAclInfo> ozoneAclGetProtobuf() {
+ List<OzoneAclInfo> aclList = new LinkedList<>();
+ for (OzoneAclType type: OzoneAclType.values()) {
+ for (Map.Entry<String, OzoneAclRights> entry :
+ aclMaps.get(type.ordinal()).entrySet()) {
+ OzoneAclInfo aclInfo = OzoneAclInfo.newBuilder()
+ .setName(entry.getKey())
+ .setType(type)
+ .setRights(entry.getValue())
+ .build();
+ aclList.add(aclInfo);
+ }
+ }
+
+ return aclList;
+ }
+
+ // Create map from list of OzoneAclInfos
+ public static OmOzoneAclMap ozoneAclGetFromProtobuf(
+ List<OzoneAclInfo> aclList) {
+ OmOzoneAclMap aclMap = new OmOzoneAclMap();
+ for (OzoneAclInfo acl : aclList) {
+ aclMap.addAcl(acl);
+ }
+ return aclMap;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
new file mode 100644
index 0000000..c8b59b6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java
@@ -0,0 +1,223 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+
+/**
+ * A class that encapsulates the OmVolumeArgs Args.
+ */
+public final class OmVolumeArgs {
+ private final String adminName;
+ private final String ownerName;
+ private final String volume;
+ private final long creationTime;
+ private final long quotaInBytes;
+ private final Map<String, String> keyValueMap;
+ private final OmOzoneAclMap aclMap;
+
+ /**
+ * Private constructor, constructed via builder.
+ * @param adminName - Administrator's name.
+ * @param ownerName - Volume owner's name
+ * @param volume - volume name
+ * @param quotaInBytes - Volume Quota in bytes.
+ * @param keyValueMap - keyValue map.
+ * @param aclMap - User to access rights map.
+ * @param creationTime - Volume creation time.
+ */
+ private OmVolumeArgs(String adminName, String ownerName, String volume,
+ long quotaInBytes, Map<String, String> keyValueMap,
+ OmOzoneAclMap aclMap, long creationTime) {
+ this.adminName = adminName;
+ this.ownerName = ownerName;
+ this.volume = volume;
+ this.quotaInBytes = quotaInBytes;
+ this.keyValueMap = keyValueMap;
+ this.aclMap = aclMap;
+ this.creationTime = creationTime;
+ }
+
+ /**
+ * Returns the Admin Name.
+ * @return String.
+ */
+ public String getAdminName() {
+ return adminName;
+ }
+
+ /**
+ * Returns the owner Name.
+ * @return String
+ */
+ public String getOwnerName() {
+ return ownerName;
+ }
+
+ /**
+ * Returns the volume Name.
+ * @return String
+ */
+ public String getVolume() {
+ return volume;
+ }
+
+ /**
+ * Returns creation time.
+ * @return long
+ */
+ public long getCreationTime() {
+ return creationTime;
+ }
+
+ /**
+ * Returns Quota in Bytes.
+ * @return long, Quota in bytes.
+ */
+ public long getQuotaInBytes() {
+ return quotaInBytes;
+ }
+
+ public Map<String, String> getKeyValueMap() {
+ return keyValueMap;
+ }
+
+ public OmOzoneAclMap getAclMap() {
+ return aclMap;
+ }
+ /**
+ * Returns new builder class that builds a OmVolumeArgs.
+ *
+ * @return Builder
+ */
+ public static Builder newBuilder() {
+ return new Builder();
+ }
+
+ /**
+ * Builder for OmVolumeArgs.
+ */
+ public static class Builder {
+ private String adminName;
+ private String ownerName;
+ private String volume;
+ private long creationTime;
+ private long quotaInBytes;
+ private Map<String, String> keyValueMap;
+ private OmOzoneAclMap aclMap;
+
+ /**
+ * Constructs a builder.
+ */
+ Builder() {
+ keyValueMap = new HashMap<>();
+ aclMap = new OmOzoneAclMap();
+ }
+
+ public Builder setAdminName(String admin) {
+ this.adminName = admin;
+ return this;
+ }
+
+ public Builder setOwnerName(String owner) {
+ this.ownerName = owner;
+ return this;
+ }
+
+ public Builder setVolume(String volumeName) {
+ this.volume = volumeName;
+ return this;
+ }
+
+ public Builder setCreationTime(long createdOn) {
+ this.creationTime = createdOn;
+ return this;
+ }
+
+ public Builder setQuotaInBytes(long quota) {
+ this.quotaInBytes = quota;
+ return this;
+ }
+
+ public Builder addMetadata(String key, String value) {
+ keyValueMap.put(key, value); // overwrite if present.
+ return this;
+ }
+
+ public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException {
+ aclMap.addAcl(acl);
+ return this;
+ }
+
+ /**
+ * Constructs a CreateVolumeArgument.
+ * @return CreateVolumeArgs.
+ */
+ public OmVolumeArgs build() {
+ Preconditions.checkNotNull(adminName);
+ Preconditions.checkNotNull(ownerName);
+ Preconditions.checkNotNull(volume);
+ return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes,
+ keyValueMap, aclMap, creationTime);
+ }
+ }
+
+ public VolumeInfo getProtobuf() {
+ List<KeyValue> metadataList = new LinkedList<>();
+ for (Map.Entry<String, String> entry : keyValueMap.entrySet()) {
+ metadataList.add(KeyValue.newBuilder().setKey(entry.getKey()).
+ setValue(entry.getValue()).build());
+ }
+ List<OzoneAclInfo> aclList = aclMap.ozoneAclGetProtobuf();
+
+ return VolumeInfo.newBuilder()
+ .setAdminName(adminName)
+ .setOwnerName(ownerName)
+ .setVolume(volume)
+ .setQuotaInBytes(quotaInBytes)
+ .addAllMetadata(metadataList)
+ .addAllVolumeAcls(aclList)
+ .setCreationTime(creationTime)
+ .build();
+ }
+
+ public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) {
+ Map<String, String> kvMap = volInfo.getMetadataList().stream()
+ .collect(Collectors.toMap(KeyValue::getKey,
+ KeyValue::getValue));
+ OmOzoneAclMap aclMap =
+ OmOzoneAclMap.ozoneAclGetFromProtobuf(volInfo.getVolumeAclsList());
+
+ return new OmVolumeArgs(volInfo.getAdminName(), volInfo.getOwnerName(),
+ volInfo.getVolume(), volInfo.getQuotaInBytes(), kvMap, aclMap,
+ volInfo.getCreationTime());
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
new file mode 100644
index 0000000..bc364e6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OpenKeySession.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+/**
+ * This class represents a open key "session". A session here means a key is
+ * opened by a specific client, the client sends the handler to server, such
+ * that servers can recognize this client, and thus know how to close the key.
+ */
+public class OpenKeySession {
+ private final int id;
+ private final OmKeyInfo keyInfo;
+ // the version of the key when it is being opened in this session.
+ // a block that has a create version equals to open version means it will
+ // be committed only when this open session is closed.
+ private long openVersion;
+
+ public OpenKeySession(int id, OmKeyInfo info, long version) {
+ this.id = id;
+ this.keyInfo = info;
+ this.openVersion = version;
+ }
+
+ public long getOpenVersion() {
+ return this.openVersion;
+ }
+
+ public OmKeyInfo getKeyInfo() {
+ return keyInfo;
+ }
+
+ public int getId() {
+ return id;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
new file mode 100644
index 0000000..9b03aef
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .ServicePort;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+/**
+ * ServiceInfo holds the config details of Ozone services.
+ */
+public final class ServiceInfo {
+
+ private static final ObjectReader READER =
+ new ObjectMapper().readerFor(ServiceInfo.class);
+ private static final ObjectWriter WRITER =
+ new ObjectMapper().writerWithDefaultPrettyPrinter();
+
+ /**
+ * Type of node/service.
+ */
+ private NodeType nodeType;
+ /**
+ * Hostname of the node in which the service is running.
+ */
+ private String hostname;
+
+ /**
+ * List of ports the service listens to.
+ */
+ private Map<ServicePort.Type, Integer> ports;
+
+ /**
+ * Default constructor for JSON deserialization.
+ */
+ public ServiceInfo() {}
+
+ /**
+ * Constructs the ServiceInfo for the {@code nodeType}.
+ * @param nodeType type of node/service
+ * @param hostname hostname of the service
+ * @param portList list of ports the service listens to
+ */
+ private ServiceInfo(
+ NodeType nodeType, String hostname, List<ServicePort> portList) {
+ Preconditions.checkNotNull(nodeType);
+ Preconditions.checkNotNull(hostname);
+ this.nodeType = nodeType;
+ this.hostname = hostname;
+ this.ports = new HashMap<>();
+ for (ServicePort port : portList) {
+ ports.put(port.getType(), port.getValue());
+ }
+ }
+
+ /**
+ * Returns the type of node/service.
+ * @return node type
+ */
+ public NodeType getNodeType() {
+ return nodeType;
+ }
+
+ /**
+ * Returns the hostname of the service.
+ * @return hostname
+ */
+ public String getHostname() {
+ return hostname;
+ }
+
+ /**
+ * Returns ServicePort.Type to port mappings.
+ * @return ports
+ */
+ public Map<ServicePort.Type, Integer> getPorts() {
+ return ports;
+ }
+
+ /**
+ * Returns the port for given type, null if the service doesn't support
+ * the type.
+ *
+ * @param type the type of port.
+ * ex: RPC, HTTP, HTTPS, etc..
+ */
+ @JsonIgnore
+ public int getPort(ServicePort.Type type) {
+ return ports.get(type);
+ }
+
+ /**
+ * Converts {@link ServiceInfo} to OzoneManagerProtocolProtos.ServiceInfo.
+ *
+ * @return OzoneManagerProtocolProtos.ServiceInfo
+ */
+ @JsonIgnore
+ public OzoneManagerProtocolProtos.ServiceInfo getProtobuf() {
+ OzoneManagerProtocolProtos.ServiceInfo.Builder builder =
+ OzoneManagerProtocolProtos.ServiceInfo.newBuilder();
+ builder.setNodeType(nodeType)
+ .setHostname(hostname)
+ .addAllServicePorts(
+ ports.entrySet().stream()
+ .map(
+ entry ->
+ ServicePort.newBuilder()
+ .setType(entry.getKey())
+ .setValue(entry.getValue()).build())
+ .collect(Collectors.toList()));
+ return builder.build();
+ }
+
+ /**
+ * Converts OzoneManagerProtocolProtos.ServiceInfo to {@link ServiceInfo}.
+ *
+ * @return {@link ServiceInfo}
+ */
+ @JsonIgnore
+ public static ServiceInfo getFromProtobuf(
+ OzoneManagerProtocolProtos.ServiceInfo serviceInfo) {
+ return new ServiceInfo(serviceInfo.getNodeType(),
+ serviceInfo.getHostname(),
+ serviceInfo.getServicePortsList());
+ }
+
+ /**
+ * Returns a JSON string of this object.
+ *
+ * @return String - json string
+ * @throws IOException
+ */
+ public String toJsonString() throws IOException {
+ return WRITER.writeValueAsString(this);
+ }
+
+ /**
+ * Parse a JSON string into ServiceInfo Object.
+ *
+ * @param jsonString Json String
+ * @return BucketInfo
+ * @throws IOException
+ */
+ public static BucketInfo parse(String jsonString) throws IOException {
+ return READER.readValue(jsonString);
+ }
+
+ /**
+ * Creates a new builder to build {@link ServiceInfo}.
+ * @return {@link ServiceInfo.Builder}
+ */
+ public static Builder newBuilder() {
+ return new Builder();
+ }
+
+ /**
+ * Builder used to build/construct {@link ServiceInfo}.
+ */
+ public static class Builder {
+
+ private NodeType node;
+ private String host;
+ private List<ServicePort> portList = new ArrayList<>();
+
+
+ /**
+ * Sets the node/service type.
+ * @param nodeType type of node
+ * @return the builder
+ */
+ public Builder setNodeType(NodeType nodeType) {
+ node = nodeType;
+ return this;
+ }
+
+ /**
+ * Sets the hostname of the service.
+ * @param hostname service hostname
+ * @return the builder
+ */
+ public Builder setHostname(String hostname) {
+ host = hostname;
+ return this;
+ }
+
+ /**
+ * Adds the service port to the service port list.
+ * @param servicePort RPC port
+ * @return the builder
+ */
+ public Builder addServicePort(ServicePort servicePort) {
+ portList.add(servicePort);
+ return this;
+ }
+
+
+ /**
+ * Builds and returns {@link ServiceInfo} with the set values.
+ * @return {@link ServiceInfo}
+ */
+ public ServiceInfo build() {
+ return new ServiceInfo(node, host, portList);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
new file mode 100644
index 0000000..6fc7c8f
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/VolumeArgs.java
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import com.google.common.base.Preconditions;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A class that encapsulates the createVolume Args.
+ */
+public final class VolumeArgs {
+ private final String adminName;
+ private final String ownerName;
+ private final String volume;
+ private final long quotaInBytes;
+ private final Map<String, String> extendedAttributes;
+
+ /**
+ * Private constructor, constructed via builder.
+ *
+ * @param adminName - Administrator name.
+ * @param ownerName - Volume owner's name
+ * @param volume - volume name
+ * @param quotaInBytes - Volume Quota in bytes.
+ * @param keyValueMap - keyValue map.
+ */
+ private VolumeArgs(String adminName, String ownerName, String volume,
+ long quotaInBytes, Map<String, String> keyValueMap) {
+ this.adminName = adminName;
+ this.ownerName = ownerName;
+ this.volume = volume;
+ this.quotaInBytes = quotaInBytes;
+ this.extendedAttributes = keyValueMap;
+ }
+
+ /**
+ * Returns the Admin Name.
+ *
+ * @return String.
+ */
+ public String getAdminName() {
+ return adminName;
+ }
+
+ /**
+ * Returns the owner Name.
+ *
+ * @return String
+ */
+ public String getOwnerName() {
+ return ownerName;
+ }
+
+ /**
+ * Returns the volume Name.
+ *
+ * @return String
+ */
+ public String getVolume() {
+ return volume;
+ }
+
+ /**
+ * Returns Quota in Bytes.
+ *
+ * @return long, Quota in bytes.
+ */
+ public long getQuotaInBytes() {
+ return quotaInBytes;
+ }
+
+ public Map<String, String> getExtendedAttributes() {
+ return extendedAttributes;
+ }
+
+ static class Builder {
+ private String adminName;
+ private String ownerName;
+ private String volume;
+ private long quotaInBytes;
+ private Map<String, String> extendedAttributes;
+
+ /**
+ * Constructs a builder.
+ */
+ Builder() {
+ extendedAttributes = new HashMap<>();
+ }
+
+ public void setAdminName(String adminName) {
+ this.adminName = adminName;
+ }
+
+ public void setOwnerName(String ownerName) {
+ this.ownerName = ownerName;
+ }
+
+ public void setVolume(String volume) {
+ this.volume = volume;
+ }
+
+ public void setQuotaInBytes(long quotaInBytes) {
+ this.quotaInBytes = quotaInBytes;
+ }
+
+ public void addMetadata(String key, String value) {
+ extendedAttributes.put(key, value); // overwrite if present.
+ }
+
+ /**
+ * Constructs a CreateVolumeArgument.
+ *
+ * @return CreateVolumeArgs.
+ */
+ public VolumeArgs build() {
+ Preconditions.checkNotNull(adminName);
+ Preconditions.checkNotNull(ownerName);
+ Preconditions.checkNotNull(volume);
+ return new VolumeArgs(adminName, ownerName, volume, quotaInBytes,
+ extendedAttributes);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
new file mode 100644
index 0000000..b1211d8
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/package-info.java
@@ -0,0 +1,18 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
new file mode 100644
index 0000000..1744cff
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+/**
+ This package contains client side protocol library to communicate with OM.
+ */
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
new file mode 100644
index 0000000..b7a099d
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java
@@ -0,0 +1,252 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocol;
+
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.OzoneAclInfo;
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Protocol to talk to OM.
+ */
+public interface OzoneManagerProtocol {
+
+ /**
+ * Creates a volume.
+ * @param args - Arguments to create Volume.
+ * @throws IOException
+ */
+ void createVolume(OmVolumeArgs args) throws IOException;
+
+ /**
+ * Changes the owner of a volume.
+ * @param volume - Name of the volume.
+ * @param owner - Name of the owner.
+ * @throws IOException
+ */
+ void setOwner(String volume, String owner) throws IOException;
+
+ /**
+ * Changes the Quota on a volume.
+ * @param volume - Name of the volume.
+ * @param quota - Quota in bytes.
+ * @throws IOException
+ */
+ void setQuota(String volume, long quota) throws IOException;
+
+ /**
+ * Checks if the specified user can access this volume.
+ * @param volume - volume
+ * @param userAcl - user acls which needs to be checked for access
+ * @return true if the user has required access for the volume,
+ * false otherwise
+ * @throws IOException
+ */
+ boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl)
+ throws IOException;
+
+ /**
+ * Gets the volume information.
+ * @param volume - Volume name.
+ * @return VolumeArgs or exception is thrown.
+ * @throws IOException
+ */
+ OmVolumeArgs getVolumeInfo(String volume) throws IOException;
+
+ /**
+ * Deletes an existing empty volume.
+ * @param volume - Name of the volume.
+ * @throws IOException
+ */
+ void deleteVolume(String volume) throws IOException;
+
+ /**
+ * Lists volume owned by a specific user.
+ * @param userName - user name
+ * @param prefix - Filter prefix -- Return only entries that match this.
+ * @param prevKey - Previous key -- List starts from the next from the prevkey
+ * @param maxKeys - Max number of keys to return.
+ * @return List of Volumes.
+ * @throws IOException
+ */
+ List<OmVolumeArgs> listVolumeByUser(String userName, String prefix, String
+ prevKey, int maxKeys) throws IOException;
+
+ /**
+ * Lists volume all volumes in the cluster.
+ * @param prefix - Filter prefix -- Return only entries that match this.
+ * @param prevKey - Previous key -- List starts from the next from the prevkey
+ * @param maxKeys - Max number of keys to return.
+ * @return List of Volumes.
+ * @throws IOException
+ */
+ List<OmVolumeArgs> listAllVolumes(String prefix, String
+ prevKey, int maxKeys) throws IOException;
+
+ /**
+ * Creates a bucket.
+ * @param bucketInfo - BucketInfo to create Bucket.
+ * @throws IOException
+ */
+ void createBucket(OmBucketInfo bucketInfo) throws IOException;
+
+ /**
+ * Gets the bucket information.
+ * @param volumeName - Volume name.
+ * @param bucketName - Bucket name.
+ * @return OmBucketInfo or exception is thrown.
+ * @throws IOException
+ */
+ OmBucketInfo getBucketInfo(String volumeName, String bucketName)
+ throws IOException;
+
+ /**
+ * Sets bucket property from args.
+ * @param args - BucketArgs.
+ * @throws IOException
+ */
+ void setBucketProperty(OmBucketArgs args) throws IOException;
+
+ /**
+ * Open the given key and return an open key session.
+ *
+ * @param args the args of the key.
+ * @return OpenKeySession instance that client uses to talk to container.
+ * @throws IOException
+ */
+ OpenKeySession openKey(OmKeyArgs args) throws IOException;
+
+ /**
+ * Commit a key. This will make the change from the client visible. The client
+ * is identified by the clientID.
+ *
+ * @param args the key to commit
+ * @param clientID the client identification
+ * @throws IOException
+ */
+ void commitKey(OmKeyArgs args, int clientID) throws IOException;
+
+ /**
+ * Allocate a new block, it is assumed that the client is having an open key
+ * session going on. This block will be appended to this open key session.
+ *
+ * @param args the key to append
+ * @param clientID the client identification
+ * @return an allocated block
+ * @throws IOException
+ */
+ OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+ throws IOException;
+
+ /**
+ * Look up for the container of an existing key.
+ *
+ * @param args the args of the key.
+ * @return OmKeyInfo instance that client uses to talk to container.
+ * @throws IOException
+ */
+ OmKeyInfo lookupKey(OmKeyArgs args) throws IOException;
+
+ /**
+ * Rename an existing key within a bucket
+ * @param args the args of the key.
+ * @param toKeyName New name to be used for the Key
+ */
+ void renameKey(OmKeyArgs args, String toKeyName) throws IOException;
+
+ /**
+ * Deletes an existing key.
+ *
+ * @param args the args of the key.
+ * @throws IOException
+ */
+ void deleteKey(OmKeyArgs args) throws IOException;
+
+ /**
+ * Deletes an existing empty bucket from volume.
+ * @param volume - Name of the volume.
+ * @param bucket - Name of the bucket.
+ * @throws IOException
+ */
+ void deleteBucket(String volume, String bucket) throws IOException;
+
+ /**
+ * Returns a list of buckets represented by {@link OmBucketInfo}
+ * in the given volume. Argument volumeName is required, others
+ * are optional.
+ *
+ * @param volumeName
+ * the name of the volume.
+ * @param startBucketName
+ * the start bucket name, only the buckets whose name is
+ * after this value will be included in the result.
+ * @param bucketPrefix
+ * bucket name prefix, only the buckets whose name has
+ * this prefix will be included in the result.
+ * @param maxNumOfBuckets
+ * the maximum number of buckets to return. It ensures
+ * the size of the result will not exceed this limit.
+ * @return a list of buckets.
+ * @throws IOException
+ */
+ List<OmBucketInfo> listBuckets(String volumeName,
+ String startBucketName, String bucketPrefix, int maxNumOfBuckets)
+ throws IOException;
+
+ /**
+ * Returns a list of keys represented by {@link OmKeyInfo}
+ * in the given bucket. Argument volumeName, bucketName is required,
+ * others are optional.
+ *
+ * @param volumeName
+ * the name of the volume.
+ * @param bucketName
+ * the name of the bucket.
+ * @param startKeyName
+ * the start key name, only the keys whose name is
+ * after this value will be included in the result.
+ * @param keyPrefix
+ * key name prefix, only the keys whose name has
+ * this prefix will be included in the result.
+ * @param maxKeys
+ * the maximum number of keys to return. It ensures
+ * the size of the result will not exceed this limit.
+ * @return a list of keys.
+ * @throws IOException
+ */
+ List<OmKeyInfo> listKeys(String volumeName,
+ String bucketName, String startKeyName, String keyPrefix, int maxKeys)
+ throws IOException;
+
+ /**
+ * Returns list of Ozone services with its configuration details.
+ *
+ * @return list of Ozone services
+ * @throws IOException
+ */
+ List<ServiceInfo> getServiceList() throws IOException;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
new file mode 100644
index 0000000..9c7f388
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.protocol;
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
new file mode 100644
index 0000000..37151fb
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -0,0 +1,769 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocolPB;
+
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.CommitKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.BucketArgs;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.BucketInfo;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.CreateBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.CreateBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.InfoBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.InfoBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.SetBucketPropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.SetBucketPropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.DeleteBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.DeleteBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.LocateKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.LocateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.RenameKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.InfoVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.InfoVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.CheckVolumeAccessResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.ListBucketsRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.ListBucketsResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.ListKeysRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.ListKeysResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.VolumeInfo;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.OzoneAclInfo;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.ListVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.ListVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.ServiceListRequest;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.ServiceListResponse;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.stream.Collectors;
+
+/**
+ * The client side implementation of OzoneManagerProtocol.
+ */
+
+@InterfaceAudience.Private
+public final class OzoneManagerProtocolClientSideTranslatorPB
+ implements OzoneManagerProtocol, ProtocolTranslator, Closeable {
+
+ /**
+ * RpcController is not used and hence is set to null.
+ */
+ private static final RpcController NULL_RPC_CONTROLLER = null;
+
+ private final OzoneManagerProtocolPB rpcProxy;
+
+ /**
+ * Constructor for KeySpaceManger Client.
+ * @param rpcProxy
+ */
+ public OzoneManagerProtocolClientSideTranslatorPB(
+ OzoneManagerProtocolPB rpcProxy) {
+ this.rpcProxy = rpcProxy;
+ }
+
+ /**
+ * Closes this stream and releases any system resources associated
+ * with it. If the stream is already closed then invoking this
+ * method has no effect.
+ * <p>
+ * <p> As noted in {@link AutoCloseable#close()}, cases where the
+ * close may fail require careful attention. It is strongly advised
+ * to relinquish the underlying resources and to internally
+ * <em>mark</em> the {@code Closeable} as closed, prior to throwing
+ * the {@code IOException}.
+ *
+ * @throws IOException if an I/O error occurs
+ */
+ @Override
+ public void close() throws IOException {
+
+ }
+
+ /**
+ * Creates a volume.
+ *
+ * @param args - Arguments to create Volume.
+ * @throws IOException
+ */
+ @Override
+ public void createVolume(OmVolumeArgs args) throws IOException {
+ CreateVolumeRequest.Builder req =
+ CreateVolumeRequest.newBuilder();
+ VolumeInfo volumeInfo = args.getProtobuf();
+ req.setVolumeInfo(volumeInfo);
+
+ final CreateVolumeResponse resp;
+ try {
+ resp = rpcProxy.createVolume(NULL_RPC_CONTROLLER,
+ req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+
+ if (resp.getStatus() != Status.OK) {
+ throw new
+ IOException("Volume creation failed, error:" + resp.getStatus());
+ }
+ }
+
+ /**
+ * Changes the owner of a volume.
+ *
+ * @param volume - Name of the volume.
+ * @param owner - Name of the owner.
+ * @throws IOException
+ */
+ @Override
+ public void setOwner(String volume, String owner) throws IOException {
+ SetVolumePropertyRequest.Builder req =
+ SetVolumePropertyRequest.newBuilder();
+ req.setVolumeName(volume).setOwnerName(owner);
+ final SetVolumePropertyResponse resp;
+ try {
+ resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new
+ IOException("Volume owner change failed, error:" + resp.getStatus());
+ }
+ }
+
+ /**
+ * Changes the Quota on a volume.
+ *
+ * @param volume - Name of the volume.
+ * @param quota - Quota in bytes.
+ * @throws IOException
+ */
+ @Override
+ public void setQuota(String volume, long quota) throws IOException {
+ SetVolumePropertyRequest.Builder req =
+ SetVolumePropertyRequest.newBuilder();
+ req.setVolumeName(volume).setQuotaInBytes(quota);
+ final SetVolumePropertyResponse resp;
+ try {
+ resp = rpcProxy.setVolumeProperty(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new
+ IOException("Volume quota change failed, error:" + resp.getStatus());
+ }
+ }
+
+ /**
+ * Checks if the specified user can access this volume.
+ *
+ * @param volume - volume
+ * @param userAcl - user acls which needs to be checked for access
+ * @return true if the user has required access for the volume,
+ * false otherwise
+ * @throws IOException
+ */
+ @Override
+ public boolean checkVolumeAccess(String volume, OzoneAclInfo userAcl) throws
+ IOException {
+ CheckVolumeAccessRequest.Builder req =
+ CheckVolumeAccessRequest.newBuilder();
+ req.setVolumeName(volume).setUserAcl(userAcl);
+ final CheckVolumeAccessResponse resp;
+ try {
+ resp = rpcProxy.checkVolumeAccess(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+
+ if (resp.getStatus() == Status.ACCESS_DENIED) {
+ return false;
+ } else if (resp.getStatus() == Status.OK) {
+ return true;
+ } else {
+ throw new
+ IOException("Check Volume Access failed, error:" + resp.getStatus());
+ }
+ }
+
+ /**
+ * Gets the volume information.
+ *
+ * @param volume - Volume name.
+ * @return OmVolumeArgs or exception is thrown.
+ * @throws IOException
+ */
+ @Override
+ public OmVolumeArgs getVolumeInfo(String volume) throws IOException {
+ InfoVolumeRequest.Builder req = InfoVolumeRequest.newBuilder();
+ req.setVolumeName(volume);
+ final InfoVolumeResponse resp;
+ try {
+ resp = rpcProxy.infoVolume(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new
+ IOException("Info Volume failed, error:" + resp.getStatus());
+ }
+ return OmVolumeArgs.getFromProtobuf(resp.getVolumeInfo());
+ }
+
+ /**
+ * Deletes an existing empty volume.
+ *
+ * @param volume - Name of the volume.
+ * @throws IOException
+ */
+ @Override
+ public void deleteVolume(String volume) throws IOException {
+ DeleteVolumeRequest.Builder req = DeleteVolumeRequest.newBuilder();
+ req.setVolumeName(volume);
+ final DeleteVolumeResponse resp;
+ try {
+ resp = rpcProxy.deleteVolume(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new
+ IOException("Delete Volume failed, error:" + resp.getStatus());
+ }
+ }
+
+ /**
+ * Lists volume owned by a specific user.
+ *
+ * @param userName - user name
+ * @param prefix - Filter prefix -- Return only entries that match this.
+ * @param prevKey - Previous key -- List starts from the next from the
+ * prevkey
+ * @param maxKeys - Max number of keys to return.
+ * @return List of Volumes.
+ * @throws IOException
+ */
+ @Override
+ public List<OmVolumeArgs> listVolumeByUser(String userName, String prefix,
+ String prevKey, int maxKeys)
+ throws IOException {
+ ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
+ if (!Strings.isNullOrEmpty(prefix)) {
+ builder.setPrefix(prefix);
+ }
+ if (!Strings.isNullOrEmpty(prevKey)) {
+ builder.setPrevKey(prevKey);
+ }
+ builder.setMaxKeys(maxKeys);
+ builder.setUserName(userName);
+ builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_USER);
+ return listVolume(builder.build());
+ }
+
+ /**
+ * Lists volume all volumes in the cluster.
+ *
+ * @param prefix - Filter prefix -- Return only entries that match this.
+ * @param prevKey - Previous key -- List starts from the next from the
+ * prevkey
+ * @param maxKeys - Max number of keys to return.
+ * @return List of Volumes.
+ * @throws IOException
+ */
+ @Override
+ public List<OmVolumeArgs> listAllVolumes(String prefix, String prevKey,
+ int maxKeys) throws IOException {
+ ListVolumeRequest.Builder builder = ListVolumeRequest.newBuilder();
+ if (!Strings.isNullOrEmpty(prefix)) {
+ builder.setPrefix(prefix);
+ }
+ if (!Strings.isNullOrEmpty(prevKey)) {
+ builder.setPrevKey(prevKey);
+ }
+ builder.setMaxKeys(maxKeys);
+ builder.setScope(ListVolumeRequest.Scope.VOLUMES_BY_CLUSTER);
+ return listVolume(builder.build());
+ }
+
+ private List<OmVolumeArgs> listVolume(ListVolumeRequest request)
+ throws IOException {
+ final ListVolumeResponse resp;
+ try {
+ resp = rpcProxy.listVolumes(NULL_RPC_CONTROLLER, request);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("List volume failed, error: "
+ + resp.getStatus());
+ }
+
+ List<OmVolumeArgs> result = Lists.newArrayList();
+ for (VolumeInfo volInfo : resp.getVolumeInfoList()) {
+ OmVolumeArgs volArgs = OmVolumeArgs.getFromProtobuf(volInfo);
+ result.add(volArgs);
+ }
+
+ return resp.getVolumeInfoList().stream()
+ .map(item -> OmVolumeArgs.getFromProtobuf(item))
+ .collect(Collectors.toList());
+ }
+
+ /**
+ * Creates a bucket.
+ *
+ * @param bucketInfo - BucketInfo to create bucket.
+ * @throws IOException
+ */
+ @Override
+ public void createBucket(OmBucketInfo bucketInfo) throws IOException {
+ CreateBucketRequest.Builder req =
+ CreateBucketRequest.newBuilder();
+ BucketInfo bucketInfoProtobuf = bucketInfo.getProtobuf();
+ req.setBucketInfo(bucketInfoProtobuf);
+
+ final CreateBucketResponse resp;
+ try {
+ resp = rpcProxy.createBucket(NULL_RPC_CONTROLLER,
+ req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Bucket creation failed, error: "
+ + resp.getStatus());
+ }
+ }
+
+ /**
+ * Gets the bucket information.
+ *
+ * @param volume - Volume name.
+ * @param bucket - Bucket name.
+ * @return OmBucketInfo or exception is thrown.
+ * @throws IOException
+ */
+ @Override
+ public OmBucketInfo getBucketInfo(String volume, String bucket)
+ throws IOException {
+ InfoBucketRequest.Builder req =
+ InfoBucketRequest.newBuilder();
+ req.setVolumeName(volume);
+ req.setBucketName(bucket);
+
+ final InfoBucketResponse resp;
+ try {
+ resp = rpcProxy.infoBucket(NULL_RPC_CONTROLLER,
+ req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() == Status.OK) {
+ return OmBucketInfo.getFromProtobuf(resp.getBucketInfo());
+ } else {
+ throw new IOException("Info Bucket failed, error: "
+ + resp.getStatus());
+ }
+ }
+
+ /**
+ * Sets bucket property from args.
+ * @param args - BucketArgs.
+ * @throws IOException
+ */
+ @Override
+ public void setBucketProperty(OmBucketArgs args)
+ throws IOException {
+ SetBucketPropertyRequest.Builder req =
+ SetBucketPropertyRequest.newBuilder();
+ BucketArgs bucketArgs = args.getProtobuf();
+ req.setBucketArgs(bucketArgs);
+ final SetBucketPropertyResponse resp;
+ try {
+ resp = rpcProxy.setBucketProperty(NULL_RPC_CONTROLLER,
+ req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Setting bucket property failed, error: "
+ + resp.getStatus());
+ }
+ }
+
+ /**
+ * List buckets in a volume.
+ *
+ * @param volumeName
+ * @param startKey
+ * @param prefix
+ * @param count
+ * @return
+ * @throws IOException
+ */
+ @Override
+ public List<OmBucketInfo> listBuckets(String volumeName,
+ String startKey, String prefix, int count) throws IOException {
+ List<OmBucketInfo> buckets = new ArrayList<>();
+ ListBucketsRequest.Builder reqBuilder = ListBucketsRequest.newBuilder();
+ reqBuilder.setVolumeName(volumeName);
+ reqBuilder.setCount(count);
+ if (startKey != null) {
+ reqBuilder.setStartKey(startKey);
+ }
+ if (prefix != null) {
+ reqBuilder.setPrefix(prefix);
+ }
+ ListBucketsRequest request = reqBuilder.build();
+ final ListBucketsResponse resp;
+ try {
+ resp = rpcProxy.listBuckets(NULL_RPC_CONTROLLER, request);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+
+ if (resp.getStatus() == Status.OK) {
+ buckets.addAll(
+ resp.getBucketInfoList().stream()
+ .map(OmBucketInfo::getFromProtobuf)
+ .collect(Collectors.toList()));
+ return buckets;
+ } else {
+ throw new IOException("List Buckets failed, error: "
+ + resp.getStatus());
+ }
+ }
+
+ /**
+ * Create a new open session of the key, then use the returned meta info to
+ * talk to data node to actually write the key.
+ * @param args the args for the key to be allocated
+ * @return a handler to the key, returned client
+ * @throws IOException
+ */
+ @Override
+ public OpenKeySession openKey(OmKeyArgs args) throws IOException {
+ LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+ KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
+ .setVolumeName(args.getVolumeName())
+ .setBucketName(args.getBucketName())
+ .setFactor(args.getFactor())
+ .setType(args.getType())
+ .setKeyName(args.getKeyName());
+ if (args.getDataSize() > 0) {
+ keyArgs.setDataSize(args.getDataSize());
+ }
+ req.setKeyArgs(keyArgs.build());
+
+ final LocateKeyResponse resp;
+ try {
+ resp = rpcProxy.createKey(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Create key failed, error:" + resp.getStatus());
+ }
+ return new OpenKeySession(resp.getID(),
+ OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion());
+ }
+
+ @Override
+ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, int clientID)
+ throws IOException {
+ AllocateBlockRequest.Builder req = AllocateBlockRequest.newBuilder();
+ KeyArgs keyArgs = KeyArgs.newBuilder()
+ .setVolumeName(args.getVolumeName())
+ .setBucketName(args.getBucketName())
+ .setKeyName(args.getKeyName())
+ .setDataSize(args.getDataSize()).build();
+ req.setKeyArgs(keyArgs);
+ req.setClientID(clientID);
+
+ final AllocateBlockResponse resp;
+ try {
+ resp = rpcProxy.allocateBlock(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Allocate block failed, error:" +
+ resp.getStatus());
+ }
+ return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation());
+ }
+
+ @Override
+ public void commitKey(OmKeyArgs args, int clientID)
+ throws IOException {
+ CommitKeyRequest.Builder req = CommitKeyRequest.newBuilder();
+ KeyArgs keyArgs = KeyArgs.newBuilder()
+ .setVolumeName(args.getVolumeName())
+ .setBucketName(args.getBucketName())
+ .setKeyName(args.getKeyName())
+ .setDataSize(args.getDataSize()).build();
+ req.setKeyArgs(keyArgs);
+ req.setClientID(clientID);
+
+ final CommitKeyResponse resp;
+ try {
+ resp = rpcProxy.commitKey(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Commit key failed, error:" +
+ resp.getStatus());
+ }
+ }
+
+
+ @Override
+ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException {
+ LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+ KeyArgs keyArgs = KeyArgs.newBuilder()
+ .setVolumeName(args.getVolumeName())
+ .setBucketName(args.getBucketName())
+ .setKeyName(args.getKeyName())
+ .setDataSize(args.getDataSize()).build();
+ req.setKeyArgs(keyArgs);
+
+ final LocateKeyResponse resp;
+ try {
+ resp = rpcProxy.lookupKey(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Lookup key failed, error:" +
+ resp.getStatus());
+ }
+ return OmKeyInfo.getFromProtobuf(resp.getKeyInfo());
+ }
+
+ @Override
+ public void renameKey(OmKeyArgs args, String toKeyName) throws IOException {
+ RenameKeyRequest.Builder req = RenameKeyRequest.newBuilder();
+ KeyArgs keyArgs = KeyArgs.newBuilder()
+ .setVolumeName(args.getVolumeName())
+ .setBucketName(args.getBucketName())
+ .setKeyName(args.getKeyName())
+ .setDataSize(args.getDataSize()).build();
+ req.setKeyArgs(keyArgs);
+ req.setToKeyName(toKeyName);
+
+ final RenameKeyResponse resp;
+ try {
+ resp = rpcProxy.renameKey(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Rename key failed, error:" +
+ resp.getStatus());
+ }
+ }
+
+ /**
+ * Deletes an existing key.
+ *
+ * @param args the args of the key.
+ * @throws IOException
+ */
+ @Override
+ public void deleteKey(OmKeyArgs args) throws IOException {
+ LocateKeyRequest.Builder req = LocateKeyRequest.newBuilder();
+ KeyArgs keyArgs = KeyArgs.newBuilder()
+ .setVolumeName(args.getVolumeName())
+ .setBucketName(args.getBucketName())
+ .setKeyName(args.getKeyName()).build();
+ req.setKeyArgs(keyArgs);
+
+ final LocateKeyResponse resp;
+ try {
+ resp = rpcProxy.deleteKey(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new IOException("Delete key failed, error:" +
+ resp.getStatus());
+ }
+ }
+
+ /**
+ * Deletes an existing empty bucket from volume.
+ * @param volume - Name of the volume.
+ * @param bucket - Name of the bucket.
+ * @throws IOException
+ */
+ public void deleteBucket(String volume, String bucket) throws IOException {
+ DeleteBucketRequest.Builder req = DeleteBucketRequest.newBuilder();
+ req.setVolumeName(volume);
+ req.setBucketName(bucket);
+ final DeleteBucketResponse resp;
+ try {
+ resp = rpcProxy.deleteBucket(NULL_RPC_CONTROLLER, req.build());
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ if (resp.getStatus() != Status.OK) {
+ throw new
+ IOException("Delete Bucket failed, error:" + resp.getStatus());
+ }
+ }
+
+ /**
+ * List keys in a bucket.
+ */
+ @Override
+ public List<OmKeyInfo> listKeys(String volumeName, String bucketName,
+ String startKey, String prefix, int maxKeys) throws IOException {
+ List<OmKeyInfo> keys = new ArrayList<>();
+ ListKeysRequest.Builder reqBuilder = ListKeysRequest.newBuilder();
+ reqBuilder.setVolumeName(volumeName);
+ reqBuilder.setBucketName(bucketName);
+ reqBuilder.setCount(maxKeys);
+
+ if (startKey != null) {
+ reqBuilder.setStartKey(startKey);
+ }
+
+ if (prefix != null) {
+ reqBuilder.setPrefix(prefix);
+ }
+
+ ListKeysRequest request = reqBuilder.build();
+ final ListKeysResponse resp;
+ try {
+ resp = rpcProxy.listKeys(NULL_RPC_CONTROLLER, request);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+
+ if (resp.getStatus() == Status.OK) {
+ keys.addAll(
+ resp.getKeyInfoList().stream()
+ .map(OmKeyInfo::getFromProtobuf)
+ .collect(Collectors.toList()));
+ return keys;
+ } else {
+ throw new IOException("List Keys failed, error: "
+ + resp.getStatus());
+ }
+ }
+
+ @Override
+ public List<ServiceInfo> getServiceList() throws IOException {
+ ServiceListRequest request = ServiceListRequest.newBuilder().build();
+ final ServiceListResponse resp;
+ try {
+ resp = rpcProxy.getServiceList(NULL_RPC_CONTROLLER, request);
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+
+ if (resp.getStatus() == Status.OK) {
+ return resp.getServiceInfoList().stream()
+ .map(ServiceInfo::getFromProtobuf)
+ .collect(Collectors.toList());
+ } else {
+ throw new IOException("Getting service list failed, error: "
+ + resp.getStatus());
+ }
+ }
+
+ /**
+ * Return the proxy object underlying this protocol translator.
+ *
+ * @return the proxy object underlying this protocol translator.
+ */
+ @Override
+ public Object getUnderlyingProxyObject() {
+ return null;
+ }
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
new file mode 100644
index 0000000..e0879d6
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolPB.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.ozone.protocol.proto
+ .OzoneManagerProtocolProtos.OzoneManagerService;
+
+/**
+ * Protocol used to communicate with OM.
+ */
+@ProtocolInfo(protocolName =
+ "org.apache.hadoop.ozone.protocol.OzoneManagerProtocol",
+ protocolVersion = 1)
+@InterfaceAudience.Private
+public interface OzoneManagerProtocolPB
+ extends OzoneManagerService.BlockingInterface {
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/061b1685/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
new file mode 100644
index 0000000..d595edf
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/package-info.java
@@ -0,0 +1,19 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.protocolPB;
\ No newline at end of file
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org