You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2016/10/13 00:44:51 UTC

[1/2] hadoop git commit: HDFS-10995. Ozone: Move ozone XceiverClient to hdfs-client. Contributed by Chen Liang.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 4217f8520 -> ef84ac469


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto
deleted file mode 100644
index 04d77db..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeContainerProtocol.proto
+++ /dev/null
@@ -1,320 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * These .proto interfaces are private and Unstable.
- * Please see http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * for what changes are allowed for a *Unstable* .proto interface.
- */
-
-// This file contains protocol buffers that are used to transfer data
-// to and from the datanode.
-option java_package = "org.apache.hadoop.hdfs.ozone.protocol.proto";
-option java_outer_classname = "ContainerProtos";
-option java_generate_equals_and_hash = true;
-package hadoop.hdfs.ozone;
-import "hdfs.proto";
-
-/**
- * Commands that are used to manipulate the state of containers on a datanode.
- *
- * These commands allow us to work against the datanode - from
- * StorageContainer Manager as well as clients.
- *
- *  1. CreateContainer - This call is usually made by Storage Container
- *     manager, when we need to create a new container on a given datanode.
- *
- *  2. ReadContainer - Allows end user to stat a container. For example
- *     this allows us to return the metadata of a container.
- *
- *  3. UpdateContainer - Updates a container metadata.
-
- *  4. DeleteContainer - This call is made to delete a container.
- *
- *  5. ListContainer - Returns the list of containers on this
- *     datanode. This will be used by tests and tools.
- *
- *  6. PutKey - Given a valid container, creates a key.
- *
- *  7. GetKey - Allows user to read the metadata of a Key.
- *
- *  8. DeleteKey - Deletes a given key.
- *
- *  9. ListKey - Returns a list of keys that are present inside
- *      a given container.
- *
- *  10. ReadChunk - Allows us to read a chunk.
- *
- *  11. DeleteChunk - Delete an unused chunk.
- *
- *  12. WriteChunk - Allows us to write a chunk
- *
- *  13. ListChunk - Given a Container/Key returns the list of Chunks.
- *
- *  14. CompactChunk - Re-writes a chunk based on Offsets.
- */
-
-enum Type {
-   CreateContainer = 1;
-   ReadContainer = 2;
-   UpdateContainer = 3;
-   DeleteContainer = 4;
-   ListContainer = 5;
-
-   PutKey = 6;
-   GetKey = 7;
-   DeleteKey = 8;
-   ListKey = 9;
-
-   ReadChunk = 10;
-   DeleteChunk = 11;
-   WriteChunk = 12;
-   ListChunk = 13;
-   CompactChunk = 14;
-}
-
-
-enum Result {
-  SUCCESS = 1;
-  UNSUPPORTED_REQUEST = 2;
-  MALFORMED_REQUEST = 3;
-  CONTAINER_INTERNAL_ERROR = 4;
-}
-
-message ContainerCommandRequestProto {
-  required Type cmdType = 1; // Type of the command
-
-  // A string that identifies this command, we generate  Trace ID in Ozone
-  // frontend and this allows us to trace that command all over ozone.
-  optional string traceID = 2;
-
-  // One of the following command is available when the corresponding
-  // cmdType is set. At the protocol level we allow only
-  // one command in each packet.
-  // TODO : Upgrade to Protobuf 2.6 or later.
-  optional   CreateContainerRequestProto createContainer = 3;
-  optional   ReadContainerRequestProto readContainer = 4;
-  optional   UpdateContainerRequestProto updateContainer = 5;
-  optional   DeleteContainerRequestProto deleteContainer = 6;
-  optional   ListContainerRequestProto listContainer = 7;
-
-  optional   PutKeyRequestProto putKey = 8;
-  optional   GetKeyRequestProto getKey = 9;
-  optional   DeleteKeyRequestProto deleteKey = 10;
-  optional   ListKeyRequestProto listKey = 11;
-
-  optional   ReadChunkRequestProto readChunk = 12;
-  optional   WriteChunkRequestProto writeChunk = 13;
-  optional   DeleteChunkRequestProto deleteChunk = 14;
-  optional   ListChunkRequestProto listChunk = 15;
-}
-
-message ContainerCommandResponseProto {
-  required Type cmdType = 1;
-  optional string traceID = 2;
-
-  optional   CreateContainerResponseProto createContainer = 3;
-  optional   ReadContainerResponseProto readContainer = 4;
-  optional   UpdateContainerResponseProto updateContainer = 5;
-  optional   DeleteContainerResponseProto deleteContainer = 6;
-  optional   ListContainerResponseProto listContainer = 7;
-
-  optional   PutKeyResponseProto putKey = 8;
-  optional   GetKeyResponseProto getKey = 9;
-  optional   DeleteKeyResponseProto deleteKey = 10;
-  optional   ListKeyResponseProto listKey = 11;
-
-  optional  WriteChunkResponseProto writeChunk = 12;
-  optional  ReadChunkResponseProto readChunk = 13;
-  optional  DeleteChunkResponseProto deleteChunk = 14;
-  optional  ListChunkResponseProto listChunk = 15;
-
-  required Result result = 17;
-  optional string message = 18;
-
-}
-
-// A pipeline is composed of one or more datanodes that back a container.
-message Pipeline {
-  required string leaderID = 1;
-  repeated DatanodeIDProto members = 2;
-  required string containerName = 3;
-}
-
-message KeyValue {
-  required string key = 1;
-  optional string value = 2;
-}
-
-message ContainerData {
-  required string name = 1;
-  repeated KeyValue metadata = 2;
-  optional string dbPath = 3;
-  optional string containerPath = 4;
-}
-
-message ContainerMeta {
-  required string fileName = 1;
-  required string hash = 2;
-}
-
-// Container Messages.
-message  CreateContainerRequestProto {
-  required Pipeline pipeline = 1;
-  required ContainerData containerData = 2;
-}
-
-message  CreateContainerResponseProto {
-}
-
-message  ReadContainerRequestProto {
-  required Pipeline pipeline = 1;
-  required string name = 2;
-}
-
-message  ReadContainerResponseProto {
-  optional ContainerData containerData = 2;
-}
-
-message  UpdateContainerRequestProto {
-  required Pipeline pipeline = 1;
-  required ContainerData containerData = 2;
-}
-
-message  UpdateContainerResponseProto {
-}
-
-message  DeleteContainerRequestProto {
-  required Pipeline pipeline = 1;
-  required string name = 2;
-}
-
-message  DeleteContainerResponseProto {
-}
-
-message  ListContainerRequestProto {
-  required Pipeline pipeline = 1;
-  optional string prefix = 2;
-  required uint32 count = 3; // Max Results to return
-  optional string prevKey = 4;  // if this is not set query from start.
-}
-
-message  ListContainerResponseProto {
-  repeated ContainerData containerData = 1;
-}
-
-
-message KeyData {
-  required string containerName = 1;
-  required string name = 2;
-  optional int64 flags = 3; // for future use.
-  repeated KeyValue metadata = 4;
-  repeated ChunkInfo chunks = 5;
-}
-
-// Key Messages.
-message  PutKeyRequestProto {
-  required Pipeline pipeline = 1;
-  required KeyData keyData = 2;
-}
-
-message  PutKeyResponseProto {
-}
-
-message  GetKeyRequestProto  {
-  required Pipeline pipeline = 1;
-  required KeyData keyData = 2;
-}
-
-message  GetKeyResponseProto  {
-  required KeyData keyData = 1;
-}
-
-
-message  DeleteKeyRequestProto {
-  required Pipeline pipeline = 1;
-  required string name = 2;
-}
-
-message   DeleteKeyResponseProto {
-}
-
-message  ListKeyRequestProto {
-  required Pipeline pipeline = 1;
-  optional string prefix = 2; // if specified returns keys that match prefix.
-  required string prevKey = 3;
-  required uint32 count = 4;
-
-}
-
-message  ListKeyResponseProto {
-  repeated KeyData keyData = 1;
-}
-
-// Chunk Operations
-
-message ChunkInfo {
-  required string chunkName = 1;
-  required uint64 offset = 2;
-  required uint64 len = 3;
-  optional string checksum = 4;
-  repeated KeyValue metadata = 5;
-}
-
-message  WriteChunkRequestProto  {
-  required Pipeline pipeline = 1;
-  required string keyName = 2;
-  required ChunkInfo chunkData = 3;
-  required bytes data = 4;
-}
-
-message  WriteChunkResponseProto {
-}
-
-message  ReadChunkRequestProto  {
-  required Pipeline pipeline = 1;
-  required string keyName = 2;
-  required ChunkInfo chunkData = 3;
-}
-
-message  ReadChunkResponseProto {
-  required Pipeline pipeline = 1;
-  required ChunkInfo chunkData = 2;
-  required bytes data = 3;
-}
-
-message  DeleteChunkRequestProto {
-  required Pipeline pipeline = 1;
-  required string keyName = 2;
-  required ChunkInfo chunkData = 3;
-}
-
-message  DeleteChunkResponseProto {
-}
-
-message  ListChunkRequestProto {
-  required Pipeline pipeline = 1;
-  required string keyName = 2;
-  required string prevChunkName = 3;
-  required uint32 count = 4;
-}
-
-message  ListChunkResponseProto {
-  repeated ChunkInfo chunkData = 1;
-}
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 3396eca..55f1311 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.junit.Assert;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index 3cb91d7..9b0adf8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.utils.LevelDBStore;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.junit.After;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 306a2f3..e765ad6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -22,8 +22,8 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.XceiverClient;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.junit.Assert;
 import org.junit.Rule;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/transport/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/transport/server/TestContainerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/transport/server/TestContainerServer.java
index a94b116..10ed9d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/transport/server/TestContainerServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/transport/server/TestContainerServer.java
@@ -25,11 +25,11 @@ import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerComm
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import org.apache.hadoop.ozone.container.common.impl.Dispatcher;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
+import org.apache.hadoop.scm.XceiverClient;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerHandler;
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[2/2] hadoop git commit: HDFS-10995. Ozone: Move ozone XceiverClient to hdfs-client. Contributed by Chen Liang.

Posted by ae...@apache.org.
HDFS-10995. Ozone: Move ozone XceiverClient to hdfs-client. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef84ac46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef84ac46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef84ac46

Branch: refs/heads/HDFS-7240
Commit: ef84ac46990338a10d62c07c0ce9a58774cec312
Parents: 4217f85
Author: Anu Engineer <ae...@apache.org>
Authored: Wed Oct 12 17:37:14 2016 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Wed Oct 12 17:37:14 2016 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   5 +
 .../org/apache/hadoop/scm/ScmConfigKeys.java    |  32 ++
 .../org/apache/hadoop/scm/XceiverClient.java    | 134 ++++++++
 .../apache/hadoop/scm/XceiverClientHandler.java | 112 +++++++
 .../hadoop/scm/XceiverClientInitializer.java    |  68 ++++
 .../apache/hadoop/scm/XceiverClientManager.java |  84 +++++
 .../scm/container/common/helpers/Pipeline.java  | 128 ++++++++
 .../container/common/helpers/package-info.java  |  22 ++
 .../org/apache/hadoop/scm/package-info.java     |  24 ++
 .../main/proto/DatanodeContainerProtocol.proto  | 320 +++++++++++++++++++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |   1 -
 .../container/common/helpers/ChunkUtils.java    |   1 +
 .../container/common/helpers/Pipeline.java      | 128 --------
 .../container/common/impl/ChunkManagerImpl.java |   2 +-
 .../common/impl/ContainerManagerImpl.java       |   2 +-
 .../ozone/container/common/impl/Dispatcher.java |   2 +-
 .../container/common/impl/KeyManagerImpl.java   |   2 +-
 .../common/interfaces/ChunkManager.java         |   2 +-
 .../common/interfaces/ContainerManager.java     |   2 +-
 .../container/common/interfaces/KeyManager.java |   2 +-
 .../common/transport/client/XceiverClient.java  | 135 --------
 .../transport/client/XceiverClientHandler.java  | 112 -------
 .../client/XceiverClientInitializer.java        |  68 ----
 .../transport/client/XceiverClientManager.java  |  83 -----
 .../common/transport/client/package-info.java   |  24 --
 .../ozone/storage/StorageContainerManager.java  |   6 +-
 .../ozone/web/storage/ChunkInputStream.java     |   4 +-
 .../ozone/web/storage/ChunkOutputStream.java    |   4 +-
 .../web/storage/ContainerProtocolCalls.java     |   2 +-
 .../web/storage/DistributedStorageHandler.java  |   6 +-
 .../main/proto/DatanodeContainerProtocol.proto  | 320 -------------------
 .../ozone/container/ContainerTestHelper.java    |   2 +-
 .../common/impl/TestContainerPersistence.java   |   2 +-
 .../container/ozoneimpl/TestOzoneContainer.java |   4 +-
 .../transport/server/TestContainerServer.java   |   4 +-
 35 files changed, 954 insertions(+), 895 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 1e38019..47692e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -111,6 +111,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
           <groupId>com.fasterxml.jackson.core</groupId>
           <artifactId>jackson-annotations</artifactId>
       </dependency>
+      <dependency>
+          <groupId>io.netty</groupId>
+          <artifactId>netty-all</artifactId>
+      </dependency>
   </dependencies>
 
   <build>
@@ -158,6 +162,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   <include>inotify.proto</include>
                   <include>erasurecoding.proto</include>
                   <include>ReconfigurationProtocol.proto</include>
+                  <include>DatanodeContainerProtocol.proto</include>
                 </includes>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
new file mode 100644
index 0000000..a1b2393
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.scm;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This class contains constants for configuration keys used in SCM
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public final class ScmConfigKeys {
+  public static final String DFS_CONTAINER_IPC_PORT =
+      "dfs.container.ipc";
+  public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 50011;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
new file mode 100644
index 0000000..e1a1a8b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClient.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.scm;
+
+import com.google.common.base.Preconditions;
+import io.netty.bootstrap.Bootstrap;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.channel.socket.nio.NioSocketChannel;
+import io.netty.handler.logging.LogLevel;
+import io.netty.handler.logging.LoggingHandler;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A Client for the storageContainer protocol.
+ */
+public class XceiverClient implements Closeable {
+  static final Logger LOG = LoggerFactory.getLogger(XceiverClient.class);
+  private final Pipeline pipeline;
+  private final Configuration config;
+  private ChannelFuture channelFuture;
+  private Bootstrap b;
+  private EventLoopGroup group;
+
+  /**
+   * Constructs a client that can communicate with the Container framework on
+   * data nodes.
+   * @param pipeline - Pipeline that defines the machines.
+   * @param config -- Ozone Config
+   */
+  public XceiverClient(Pipeline pipeline, Configuration config) {
+    Preconditions.checkNotNull(pipeline);
+    Preconditions.checkNotNull(config);
+    this.pipeline = pipeline;
+    this.config = config;
+  }
+
+  /**
+   * Connects to the leader in the pipeline.
+   */
+  public void connect() throws Exception {
+    if (channelFuture != null
+        && channelFuture.channel() != null
+        && channelFuture.channel().isActive()) {
+      throw new IOException("This client is already connected to a host.");
+    }
+
+    group = new NioEventLoopGroup();
+    b = new Bootstrap();
+    b.group(group)
+        .channel(NioSocketChannel.class)
+        .handler(new LoggingHandler(LogLevel.INFO))
+        .handler(new XceiverClientInitializer(this.pipeline));
+    DatanodeID leader = this.pipeline.getLeader();
+
+    // read port from the data node, on failure use default configured
+    // port.
+    int port = leader.getContainerPort();
+    if (port == 0) {
+      port = config.getInt(ScmConfigKeys.DFS_CONTAINER_IPC_PORT,
+          ScmConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
+    }
+    LOG.debug("Connecting to server Port : " + port);
+    channelFuture = b.connect(leader.getHostName(), port).sync();
+  }
+
+  /**
+   * Close the client.
+   */
+  @Override
+  public void close() {
+    if(group != null) {
+      group.shutdownGracefully(0, 0, TimeUnit.SECONDS);
+    }
+
+    if (channelFuture != null) {
+      channelFuture.channel().close();
+    }
+  }
+
+  /**
+   * Returns the pipeline of machines that host the container used by this
+   * client.
+   *
+   * @return pipeline of machines that host the container
+   */
+  public Pipeline getPipeline() {
+    return pipeline;
+  }
+
+  /**
+   * Sends a given command to server and gets the reply back.
+   * @param request Request
+   * @return Response to the command
+   * @throws IOException
+   */
+  public ContainerProtos.ContainerCommandResponseProto sendCommand(
+      ContainerProtos.ContainerCommandRequestProto request)
+      throws IOException {
+    if((channelFuture == null) || (!channelFuture.channel().isActive())) {
+      throw new IOException("This channel is not connected.");
+    }
+    XceiverClientHandler handler =
+        channelFuture.channel().pipeline().get(XceiverClientHandler.class);
+
+    return handler.sendCommand(request);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
new file mode 100644
index 0000000..23e7443
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.scm;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.SimpleChannelInboundHandler;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+/**
+ * Netty client handler.
+ */
+public class XceiverClientHandler extends
+    SimpleChannelInboundHandler<ContainerProtos.ContainerCommandResponseProto> {
+
+  static final Logger LOG = LoggerFactory.getLogger(XceiverClientHandler.class);
+  private final BlockingQueue<ContainerProtos.ContainerCommandResponseProto>
+      responses = new LinkedBlockingQueue<>();
+  private final Pipeline pipeline;
+  private volatile Channel channel;
+
+  /**
+   * Constructs a client that can communicate to a container server.
+   */
+  public XceiverClientHandler(Pipeline pipeline) {
+    super(false);
+    this.pipeline = pipeline;
+  }
+
+  /**
+   * <strong>Please keep in mind that this method will be renamed to {@code
+   * messageReceived(ChannelHandlerContext, I)} in 5.0.</strong>
+   * <p>
+   * Is called for each message of type {@link ContainerProtos
+   * .ContainerCommandResponseProto}.
+   *
+   * @param ctx the {@link ChannelHandlerContext} which this {@link
+   *            SimpleChannelInboundHandler} belongs to
+   * @param msg the message to handle
+   * @throws Exception is thrown if an error occurred
+   */
+  @Override
+  public void channelRead0(ChannelHandlerContext ctx,
+                           ContainerProtos.ContainerCommandResponseProto msg)
+      throws Exception {
+    responses.add(msg);
+  }
+
+  @Override
+  public void channelRegistered(ChannelHandlerContext ctx) {
+    LOG.debug("channelRegistered: Connected to ctx");
+    channel = ctx.channel();
+  }
+
+  @Override
+  public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
+    LOG.info("Exception in client " + cause.toString());
+    ctx.close();
+  }
+
+  /**
+   * Since netty is async, we send a work request and then wait until a response
+   * appears in the reply queue. This is simple sync interface for clients. we
+   * should consider building async interfaces for client if this turns out to
+   * be a performance bottleneck.
+   *
+   * @param request - request.
+   * @return -- response
+   */
+  public ContainerProtos.ContainerCommandResponseProto
+      sendCommand(ContainerProtos.ContainerCommandRequestProto request) {
+
+    ContainerProtos.ContainerCommandResponseProto response;
+    channel.writeAndFlush(request);
+    boolean interrupted = false;
+    for (;;) {
+      try {
+        response = responses.take();
+        break;
+      } catch (InterruptedException ignore) {
+        interrupted = true;
+      }
+    }
+
+    if (interrupted) {
+      Thread.currentThread().interrupt();
+    }
+    return response;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java
new file mode 100644
index 0000000..fbfb7ca
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.scm;
+
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.ChannelPipeline;
+import io.netty.channel.socket.SocketChannel;
+import io.netty.handler.codec.protobuf.ProtobufDecoder;
+import io.netty.handler.codec.protobuf.ProtobufEncoder;
+import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
+import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+
+/**
+ * Setup the netty pipeline.
+ */
+public class XceiverClientInitializer extends
+    ChannelInitializer<SocketChannel> {
+  private final Pipeline pipeline;
+
+  /**
+   * Constructs an Initializer for the client pipeline.
+   * @param pipeline  - Pipeline.
+   */
+  public XceiverClientInitializer(Pipeline pipeline) {
+    this.pipeline = pipeline;
+  }
+
+  /**
+   * This method will be called once when the Channel is registered. After
+   * the method returns this instance will be removed from the
+   * ChannelPipeline of the Channel.
+   *
+   * @param ch   Channel which was registered.
+   * @throws Exception is thrown if an error occurs. In that case the
+   *                   Channel will be closed.
+   */
+  @Override
+  protected void initChannel(SocketChannel ch) throws Exception {
+    ChannelPipeline p = ch.pipeline();
+
+    p.addLast(new ProtobufVarint32FrameDecoder());
+    p.addLast(new ProtobufDecoder(ContainerProtos
+        .ContainerCommandResponseProto.getDefaultInstance()));
+
+    p.addLast(new ProtobufVarint32LengthFieldPrepender());
+    p.addLast(new ProtobufEncoder());
+
+    p.addLast(new XceiverClientHandler(this.pipeline));
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
new file mode 100644
index 0000000..b9d7765
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.scm;
+
+import java.io.IOException;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+
+/**
+ * XceiverClientManager is responsible for the lifecycle of XceiverClient
+ * instances.  Callers use this class to acquire an XceiverClient instance
+ * connected to the desired container pipeline.  When done, the caller also uses
+ * this class to release the previously acquired XceiverClient instance.
+ *
+ * This class may evolve to implement efficient lifecycle management policies by
+ * caching container location information and pooling connected client instances
+ * for reuse without needing to reestablish a socket connection.  The current
+ * implementation simply allocates and closes a new instance every time.
+ */
+public class XceiverClientManager {
+
+  //TODO : change this to SCM configuration class
+  private final Configuration conf;
+
+  /**
+   * Creates a new XceiverClientManager.
+   *
+   * @param conf configuration
+   */
+  public XceiverClientManager(Configuration conf) {
+    Preconditions.checkNotNull(conf);
+    this.conf = conf;
+  }
+
+  /**
+   * Acquires a XceiverClient connected to a container capable of storing the
+   * specified key.
+   *
+   * @param pipeline the container pipeline for the client connection
+   * @return XceiverClient connected to a container
+   * @throws IOException if an XceiverClient cannot be acquired
+   */
+  public XceiverClient acquireClient(Pipeline pipeline) throws IOException {
+    Preconditions.checkNotNull(pipeline);
+    Preconditions.checkArgument(pipeline.getMachines() != null);
+    Preconditions.checkArgument(!pipeline.getMachines().isEmpty());
+    XceiverClient xceiverClient = new XceiverClient(pipeline, conf);
+    try {
+      xceiverClient.connect();
+    } catch (Exception e) {
+      throw new IOException("Exception connecting XceiverClient.", e);
+    }
+    return xceiverClient;
+  }
+
+  /**
+   * Releases an XceiverClient after use.
+   *
+   * @param xceiverClient client to release
+   */
+  public void releaseClient(XceiverClient xceiverClient) {
+    Preconditions.checkNotNull(xceiverClient);
+    xceiverClient.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/Pipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/Pipeline.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/Pipeline.java
new file mode 100644
index 0000000..fe10ca2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/Pipeline.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.scm.container.common.helpers;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * A pipeline represents the group of machines over which a container lives.
+ */
+public class Pipeline {
+  private String containerName;
+  private String leaderID;
+  private Map<String, DatanodeID> datanodes;
+
+  /**
+   * Constructs a new pipeline data structure.
+   *
+   * @param leaderID - First machine in this pipeline.
+   */
+  public Pipeline(String leaderID) {
+    this.leaderID = leaderID;
+    datanodes = new TreeMap<>();
+  }
+
+  /**
+   * Gets pipeline object from protobuf.
+   *
+   * @param pipeline - ProtoBuf definition for the pipeline.
+   * @return Pipeline Object
+   */
+  public static Pipeline getFromProtoBuf(ContainerProtos.Pipeline pipeline) {
+    Preconditions.checkNotNull(pipeline);
+    Pipeline newPipeline = new Pipeline(pipeline.getLeaderID());
+    for (HdfsProtos.DatanodeIDProto dataID : pipeline.getMembersList()) {
+      newPipeline.addMember(DatanodeID.getFromProtoBuf(dataID));
+    }
+
+    newPipeline.setContainerName(pipeline.getContainerName());
+    return newPipeline;
+  }
+
+  /** Adds a member to pipeline */
+
+  /**
+   * Adds a member to the pipeline.
+   *
+   * @param dataNodeId - Datanode to be added.
+   */
+  public void addMember(DatanodeID dataNodeId) {
+    datanodes.put(dataNodeId.getDatanodeUuid(), dataNodeId);
+  }
+
+  /**
+   * Returns the first machine in the set of datanodes.
+   *
+   * @return First Machine.
+   */
+  public DatanodeID getLeader() {
+    return datanodes.get(leaderID);
+  }
+
+  /**
+   * Returns all machines that make up this pipeline.
+   *
+   * @return List of Machines.
+   */
+  public List<DatanodeID> getMachines() {
+    return new ArrayList<>(datanodes.values());
+  }
+
+  /**
+   * Return a Protobuf Pipeline message from pipeline.
+   *
+   * @return Protobuf message
+   */
+  public ContainerProtos.Pipeline getProtobufMessage() {
+    ContainerProtos.Pipeline.Builder builder =
+        ContainerProtos.Pipeline.newBuilder();
+    for (DatanodeID datanode : datanodes.values()) {
+      builder.addMembers(datanode.getProtoBufMessage());
+    }
+    builder.setLeaderID(leaderID);
+    builder.setContainerName(this.containerName);
+    return builder.build();
+  }
+
+  /**
+   * Returns containerName if available.
+   *
+   * @return String.
+   */
+  public String getContainerName() {
+    return containerName;
+  }
+
+  /**
+   * Sets the container Name.
+   *
+   * @param containerName - Name of the container.
+   */
+  public void setContainerName(String containerName) {
+    this.containerName = containerName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/package-info.java
new file mode 100644
index 0000000..3fa9663
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/container/common/helpers/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.scm.container.common.helpers;
+/**
+ Contains protocol buffer helper classes and utilites used in
+ impl.
+ **/
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/package-info.java
new file mode 100644
index 0000000..ad24f98
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/package-info.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.scm;
+
+/**
+ * This package contains classes for the client of the storage container
+ * protocol.
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
new file mode 100644
index 0000000..04d77db
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/DatanodeContainerProtocol.proto
@@ -0,0 +1,320 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and Unstable.
+ * Please see http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/InterfaceClassification.html
+ * for what changes are allowed for a *Unstable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used to transfer data
+// to and from the datanode.
+option java_package = "org.apache.hadoop.hdfs.ozone.protocol.proto";
+option java_outer_classname = "ContainerProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs.ozone;
+import "hdfs.proto";
+
+/**
+ * Commands that are used to manipulate the state of containers on a datanode.
+ *
+ * These commands allow us to work against the datanode - from
+ * StorageContainer Manager as well as clients.
+ *
+ *  1. CreateContainer - This call is usually made by Storage Container
+ *     manager, when we need to create a new container on a given datanode.
+ *
+ *  2. ReadContainer - Allows end user to stat a container. For example
+ *     this allows us to return the metadata of a container.
+ *
+ *  3. UpdateContainer - Updates a container metadata.
+
+ *  4. DeleteContainer - This call is made to delete a container.
+ *
+ *  5. ListContainer - Returns the list of containers on this
+ *     datanode. This will be used by tests and tools.
+ *
+ *  6. PutKey - Given a valid container, creates a key.
+ *
+ *  7. GetKey - Allows user to read the metadata of a Key.
+ *
+ *  8. DeleteKey - Deletes a given key.
+ *
+ *  9. ListKey - Returns a list of keys that are present inside
+ *      a given container.
+ *
+ *  10. ReadChunk - Allows us to read a chunk.
+ *
+ *  11. DeleteChunk - Delete an unused chunk.
+ *
+ *  12. WriteChunk - Allows us to write a chunk
+ *
+ *  13. ListChunk - Given a Container/Key returns the list of Chunks.
+ *
+ *  14. CompactChunk - Re-writes a chunk based on Offsets.
+ */
+
+enum Type {
+   CreateContainer = 1;
+   ReadContainer = 2;
+   UpdateContainer = 3;
+   DeleteContainer = 4;
+   ListContainer = 5;
+
+   PutKey = 6;
+   GetKey = 7;
+   DeleteKey = 8;
+   ListKey = 9;
+
+   ReadChunk = 10;
+   DeleteChunk = 11;
+   WriteChunk = 12;
+   ListChunk = 13;
+   CompactChunk = 14;
+}
+
+
+enum Result {
+  SUCCESS = 1;
+  UNSUPPORTED_REQUEST = 2;
+  MALFORMED_REQUEST = 3;
+  CONTAINER_INTERNAL_ERROR = 4;
+}
+
+message ContainerCommandRequestProto {
+  required Type cmdType = 1; // Type of the command
+
+  // A string that identifies this command, we generate  Trace ID in Ozone
+  // frontend and this allows us to trace that command all over ozone.
+  optional string traceID = 2;
+
+  // One of the following command is available when the corresponding
+  // cmdType is set. At the protocol level we allow only
+  // one command in each packet.
+  // TODO : Upgrade to Protobuf 2.6 or later.
+  optional   CreateContainerRequestProto createContainer = 3;
+  optional   ReadContainerRequestProto readContainer = 4;
+  optional   UpdateContainerRequestProto updateContainer = 5;
+  optional   DeleteContainerRequestProto deleteContainer = 6;
+  optional   ListContainerRequestProto listContainer = 7;
+
+  optional   PutKeyRequestProto putKey = 8;
+  optional   GetKeyRequestProto getKey = 9;
+  optional   DeleteKeyRequestProto deleteKey = 10;
+  optional   ListKeyRequestProto listKey = 11;
+
+  optional   ReadChunkRequestProto readChunk = 12;
+  optional   WriteChunkRequestProto writeChunk = 13;
+  optional   DeleteChunkRequestProto deleteChunk = 14;
+  optional   ListChunkRequestProto listChunk = 15;
+}
+
+message ContainerCommandResponseProto {
+  required Type cmdType = 1;
+  optional string traceID = 2;
+
+  optional   CreateContainerResponseProto createContainer = 3;
+  optional   ReadContainerResponseProto readContainer = 4;
+  optional   UpdateContainerResponseProto updateContainer = 5;
+  optional   DeleteContainerResponseProto deleteContainer = 6;
+  optional   ListContainerResponseProto listContainer = 7;
+
+  optional   PutKeyResponseProto putKey = 8;
+  optional   GetKeyResponseProto getKey = 9;
+  optional   DeleteKeyResponseProto deleteKey = 10;
+  optional   ListKeyResponseProto listKey = 11;
+
+  optional  WriteChunkResponseProto writeChunk = 12;
+  optional  ReadChunkResponseProto readChunk = 13;
+  optional  DeleteChunkResponseProto deleteChunk = 14;
+  optional  ListChunkResponseProto listChunk = 15;
+
+  required Result result = 17;
+  optional string message = 18;
+
+}
+
+// A pipeline is composed of one or more datanodes that back a container.
+message Pipeline {
+  required string leaderID = 1;
+  repeated DatanodeIDProto members = 2;
+  required string containerName = 3;
+}
+
+message KeyValue {
+  required string key = 1;
+  optional string value = 2;
+}
+
+message ContainerData {
+  required string name = 1;
+  repeated KeyValue metadata = 2;
+  optional string dbPath = 3;
+  optional string containerPath = 4;
+}
+
+message ContainerMeta {
+  required string fileName = 1;
+  required string hash = 2;
+}
+
+// Container Messages.
+message  CreateContainerRequestProto {
+  required Pipeline pipeline = 1;
+  required ContainerData containerData = 2;
+}
+
+message  CreateContainerResponseProto {
+}
+
+message  ReadContainerRequestProto {
+  required Pipeline pipeline = 1;
+  required string name = 2;
+}
+
+message  ReadContainerResponseProto {
+  optional ContainerData containerData = 2;
+}
+
+message  UpdateContainerRequestProto {
+  required Pipeline pipeline = 1;
+  required ContainerData containerData = 2;
+}
+
+message  UpdateContainerResponseProto {
+}
+
+message  DeleteContainerRequestProto {
+  required Pipeline pipeline = 1;
+  required string name = 2;
+}
+
+message  DeleteContainerResponseProto {
+}
+
+message  ListContainerRequestProto {
+  required Pipeline pipeline = 1;
+  optional string prefix = 2;
+  required uint32 count = 3; // Max Results to return
+  optional string prevKey = 4;  // if this is not set query from start.
+}
+
+message  ListContainerResponseProto {
+  repeated ContainerData containerData = 1;
+}
+
+
+message KeyData {
+  required string containerName = 1;
+  required string name = 2;
+  optional int64 flags = 3; // for future use.
+  repeated KeyValue metadata = 4;
+  repeated ChunkInfo chunks = 5;
+}
+
+// Key Messages.
+message  PutKeyRequestProto {
+  required Pipeline pipeline = 1;
+  required KeyData keyData = 2;
+}
+
+message  PutKeyResponseProto {
+}
+
+message  GetKeyRequestProto  {
+  required Pipeline pipeline = 1;
+  required KeyData keyData = 2;
+}
+
+message  GetKeyResponseProto  {
+  required KeyData keyData = 1;
+}
+
+
+message  DeleteKeyRequestProto {
+  required Pipeline pipeline = 1;
+  required string name = 2;
+}
+
+message   DeleteKeyResponseProto {
+}
+
+message  ListKeyRequestProto {
+  required Pipeline pipeline = 1;
+  optional string prefix = 2; // if specified returns keys that match prefix.
+  required string prevKey = 3;
+  required uint32 count = 4;
+
+}
+
+message  ListKeyResponseProto {
+  repeated KeyData keyData = 1;
+}
+
+// Chunk Operations
+
+message ChunkInfo {
+  required string chunkName = 1;
+  required uint64 offset = 2;
+  required uint64 len = 3;
+  optional string checksum = 4;
+  repeated KeyValue metadata = 5;
+}
+
+message  WriteChunkRequestProto  {
+  required Pipeline pipeline = 1;
+  required string keyName = 2;
+  required ChunkInfo chunkData = 3;
+  required bytes data = 4;
+}
+
+message  WriteChunkResponseProto {
+}
+
+message  ReadChunkRequestProto  {
+  required Pipeline pipeline = 1;
+  required string keyName = 2;
+  required ChunkInfo chunkData = 3;
+}
+
+message  ReadChunkResponseProto {
+  required Pipeline pipeline = 1;
+  required ChunkInfo chunkData = 2;
+  required bytes data = 3;
+}
+
+message  DeleteChunkRequestProto {
+  required Pipeline pipeline = 1;
+  required string keyName = 2;
+  required ChunkInfo chunkData = 3;
+}
+
+message  DeleteChunkResponseProto {
+}
+
+message  ListChunkRequestProto {
+  required Pipeline pipeline = 1;
+  required string keyName = 2;
+  required string prevChunkName = 3;
+  required uint32 count = 4;
+}
+
+message  ListChunkResponseProto {
+  repeated ChunkInfo chunkData = 1;
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 89d766b..0075a0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -352,7 +352,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                   <include>QJournalProtocol.proto</include>
                   <include>editlog.proto</include>
                   <include>fsimage.proto</include>
-                  <include>DatanodeContainerProtocol.proto</include>
                   <include>StorageContainerLocationProtocol.proto</include>
                 </includes>
               </source>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
index b4c8aa6..a38567b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/Pipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/Pipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/Pipeline.java
deleted file mode 100644
index c1ec48d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/Pipeline.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.helpers;
-
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-/**
- * A pipeline represents the group of machines over which a container lives.
- */
-public class Pipeline {
-  private String containerName;
-  private String leaderID;
-  private Map<String, DatanodeID> datanodes;
-
-  /**
-   * Constructs a new pipeline data structure.
-   *
-   * @param leaderID - First machine in this pipeline.
-   */
-  public Pipeline(String leaderID) {
-    this.leaderID = leaderID;
-    datanodes = new TreeMap<>();
-  }
-
-  /**
-   * Gets pipeline object from protobuf.
-   *
-   * @param pipeline - ProtoBuf definition for the pipeline.
-   * @return Pipeline Object
-   */
-  public static Pipeline getFromProtoBuf(ContainerProtos.Pipeline pipeline) {
-    Preconditions.checkNotNull(pipeline);
-    Pipeline newPipeline = new Pipeline(pipeline.getLeaderID());
-    for (HdfsProtos.DatanodeIDProto dataID : pipeline.getMembersList()) {
-      newPipeline.addMember(DatanodeID.getFromProtoBuf(dataID));
-    }
-
-    newPipeline.setContainerName(pipeline.getContainerName());
-    return newPipeline;
-  }
-
-  /** Adds a member to pipeline */
-
-  /**
-   * Adds a member to the pipeline.
-   *
-   * @param dataNodeId - Datanode to be added.
-   */
-  public void addMember(DatanodeID dataNodeId) {
-    datanodes.put(dataNodeId.getDatanodeUuid(), dataNodeId);
-  }
-
-  /**
-   * Returns the first machine in the set of datanodes.
-   *
-   * @return First Machine.
-   */
-  public DatanodeID getLeader() {
-    return datanodes.get(leaderID);
-  }
-
-  /**
-   * Returns all machines that make up this pipeline.
-   *
-   * @return List of Machines.
-   */
-  public List<DatanodeID> getMachines() {
-    return new ArrayList<>(datanodes.values());
-  }
-
-  /**
-   * Return a Protobuf Pipeline message from pipeline.
-   *
-   * @return Protobuf message
-   */
-  public ContainerProtos.Pipeline getProtobufMessage() {
-    ContainerProtos.Pipeline.Builder builder =
-        ContainerProtos.Pipeline.newBuilder();
-    for (DatanodeID datanode : datanodes.values()) {
-      builder.addMembers(datanode.getProtoBufMessage());
-    }
-    builder.setLeaderID(leaderID);
-    builder.setContainerName(this.containerName);
-    return builder.build();
-  }
-
-  /**
-   * Returns containerName if available.
-   *
-   * @return String.
-   */
-  public String getContainerName() {
-    return containerName;
-  }
-
-  /**
-   * Sets the container Name.
-   *
-   * @param containerName - Name of the container.
-   */
-  public void setContainerName(String containerName) {
-    this.containerName = containerName;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
index c30fa8a..9a04a77 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
@@ -21,7 +21,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 0a9e22a..1b1c803 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerLocationManager;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
index bad1d23..88c5d98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import org.slf4j.Logger;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
index a1b6aa9..56f05c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import org.apache.hadoop.ozone.container.common.interfaces.KeyManager;
 import org.apache.hadoop.ozone.container.common.utils.ContainerCache;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
index 5931c96..7adcd56 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.ozone.container.common.interfaces;
 
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 
 import java.io.IOException;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
index 5602920..0524443 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 
 import java.io.IOException;
 import java.nio.file.Path;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
index bd58a01..c6d51e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.container.common.interfaces;
 
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 
 import java.io.IOException;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java
deleted file mode 100644
index d25119b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.client;
-
-import com.google.common.base.Preconditions;
-import io.netty.bootstrap.Bootstrap;
-import io.netty.channel.ChannelFuture;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.nio.NioSocketChannel;
-import io.netty.handler.logging.LogLevel;
-import io.netty.handler.logging.LoggingHandler;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.concurrent.TimeUnit;
-
-/**
- * A Client for the storageContainer protocol.
- */
-public class XceiverClient implements Closeable {
-  static final Logger LOG = LoggerFactory.getLogger(XceiverClient.class);
-  private final Pipeline pipeline;
-  private final Configuration config;
-  private ChannelFuture channelFuture;
-  private Bootstrap b;
-  private EventLoopGroup group;
-
-  /**
-   * Constructs a client that can communicate with the Container framework on
-   * data nodes.
-   * @param pipeline - Pipeline that defines the machines.
-   * @param config -- Ozone Config
-   */
-  public XceiverClient(Pipeline pipeline, Configuration config) {
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkNotNull(config);
-    this.pipeline = pipeline;
-    this.config = config;
-  }
-
-  /**
-   * Connects to the leader in the pipeline.
-   */
-  public void connect() throws Exception {
-    if (channelFuture != null
-        && channelFuture.channel() != null
-        && channelFuture.channel().isActive()) {
-      throw new IOException("This client is already connected to a host.");
-    }
-
-    group = new NioEventLoopGroup();
-    b = new Bootstrap();
-    b.group(group)
-        .channel(NioSocketChannel.class)
-        .handler(new LoggingHandler(LogLevel.INFO))
-        .handler(new XceiverClientInitializer(this.pipeline));
-    DatanodeID leader = this.pipeline.getLeader();
-
-    // read port from the data node, on failure use default configured
-    // port.
-    int port = leader.getContainerPort();
-    if (port == 0) {
-      port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
-          OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
-    }
-    LOG.debug("Connecting to server Port : " + port);
-    channelFuture = b.connect(leader.getHostName(), port).sync();
-  }
-
-  /**
-   * Close the client.
-   */
-  @Override
-  public void close() {
-    if(group != null) {
-      group.shutdownGracefully(0, 0, TimeUnit.SECONDS);
-    }
-
-    if (channelFuture != null) {
-      channelFuture.channel().close();
-    }
-  }
-
-  /**
-   * Returns the pipeline of machines that host the container used by this
-   * client.
-   *
-   * @return pipeline of machines that host the container
-   */
-  public Pipeline getPipeline() {
-    return pipeline;
-  }
-
-  /**
-   * Sends a given command to server and gets the reply back.
-   * @param request Request
-   * @return Response to the command
-   * @throws IOException
-   */
-  public ContainerProtos.ContainerCommandResponseProto sendCommand(
-      ContainerProtos.ContainerCommandRequestProto request)
-      throws IOException {
-    if((channelFuture == null) || (!channelFuture.channel().isActive())) {
-      throw new IOException("This channel is not connected.");
-    }
-    XceiverClientHandler handler =
-        channelFuture.channel().pipeline().get(XceiverClientHandler.class);
-
-    return handler.sendCommand(request);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientHandler.java
deleted file mode 100644
index c9a3ad3..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientHandler.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.transport.client;
-
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.SimpleChannelInboundHandler;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
-
-/**
- * Netty client handler.
- */
-public class XceiverClientHandler extends
-    SimpleChannelInboundHandler<ContainerProtos.ContainerCommandResponseProto> {
-
-  static final Logger LOG = LoggerFactory.getLogger(XceiverClientHandler.class);
-  private final BlockingQueue<ContainerProtos.ContainerCommandResponseProto>
-      responses = new LinkedBlockingQueue<>();
-  private final Pipeline pipeline;
-  private volatile Channel channel;
-
-  /**
-   * Constructs a client that can communicate to a container server.
-   */
-  public XceiverClientHandler(Pipeline pipeline) {
-    super(false);
-    this.pipeline = pipeline;
-  }
-
-  /**
-   * <strong>Please keep in mind that this method will be renamed to {@code
-   * messageReceived(ChannelHandlerContext, I)} in 5.0.</strong>
-   * <p>
-   * Is called for each message of type {@link ContainerProtos
-   * .ContainerCommandResponseProto}.
-   *
-   * @param ctx the {@link ChannelHandlerContext} which this {@link
-   *            SimpleChannelInboundHandler} belongs to
-   * @param msg the message to handle
-   * @throws Exception is thrown if an error occurred
-   */
-  @Override
-  public void channelRead0(ChannelHandlerContext ctx,
-                           ContainerProtos.ContainerCommandResponseProto msg)
-      throws Exception {
-    responses.add(msg);
-  }
-
-  @Override
-  public void channelRegistered(ChannelHandlerContext ctx) {
-    LOG.debug("channelRegistered: Connected to ctx");
-    channel = ctx.channel();
-  }
-
-  @Override
-  public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
-    LOG.info("Exception in client " + cause.toString());
-    ctx.close();
-  }
-
-  /**
-   * Since netty is async, we send a work request and then wait until a response
-   * appears in the reply queue. This is simple sync interface for clients. we
-   * should consider building async interfaces for client if this turns out to
-   * be a performance bottleneck.
-   *
-   * @param request - request.
-   * @return -- response
-   */
-  public ContainerProtos.ContainerCommandResponseProto
-      sendCommand(ContainerProtos.ContainerCommandRequestProto request) {
-
-    ContainerProtos.ContainerCommandResponseProto response;
-    channel.writeAndFlush(request);
-    boolean interrupted = false;
-    for (;;) {
-      try {
-        response = responses.take();
-        break;
-      } catch (InterruptedException ignore) {
-        interrupted = true;
-      }
-    }
-
-    if (interrupted) {
-      Thread.currentThread().interrupt();
-    }
-    return response;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientInitializer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientInitializer.java
deleted file mode 100644
index cbf8ee9..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientInitializer.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.container.common.transport.client;
-
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.ChannelPipeline;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.handler.codec.protobuf.ProtobufDecoder;
-import io.netty.handler.codec.protobuf.ProtobufEncoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-
-/**
- * Setup the netty pipeline.
- */
-public class XceiverClientInitializer extends
-    ChannelInitializer<SocketChannel> {
-  private final Pipeline pipeline;
-
-  /**
-   * Constructs an Initializer for the client pipeline.
-   * @param pipeline  - Pipeline.
-   */
-  public XceiverClientInitializer(Pipeline pipeline) {
-    this.pipeline = pipeline;
-  }
-
-  /**
-   * This method will be called once when the Channel is registered. After
-   * the method returns this instance will be removed from the
-   * ChannelPipeline of the Channel.
-   *
-   * @param ch   Channel which was registered.
-   * @throws Exception is thrown if an error occurs. In that case the
-   *                   Channel will be closed.
-   */
-  @Override
-  protected void initChannel(SocketChannel ch) throws Exception {
-    ChannelPipeline p = ch.pipeline();
-
-    p.addLast(new ProtobufVarint32FrameDecoder());
-    p.addLast(new ProtobufDecoder(ContainerProtos
-        .ContainerCommandResponseProto.getDefaultInstance()));
-
-    p.addLast(new ProtobufVarint32LengthFieldPrepender());
-    p.addLast(new ProtobufEncoder());
-
-    p.addLast(new XceiverClientHandler(this.pipeline));
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientManager.java
deleted file mode 100644
index 8123ae9..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClientManager.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.client;
-
-import java.io.IOException;
-
-import com.google.common.base.Preconditions;
-
-import org.apache.hadoop.ozone.OzoneConfiguration;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
-
-/**
- * XceiverClientManager is responsible for the lifecycle of XceiverClient
- * instances.  Callers use this class to acquire an XceiverClient instance
- * connected to the desired container pipeline.  When done, the caller also uses
- * this class to release the previously acquired XceiverClient instance.
- *
- * This class may evolve to implement efficient lifecycle management policies by
- * caching container location information and pooling connected client instances
- * for reuse without needing to reestablish a socket connection.  The current
- * implementation simply allocates and closes a new instance every time.
- */
-public class XceiverClientManager {
-
-  private final OzoneConfiguration conf;
-
-  /**
-   * Creates a new XceiverClientManager.
-   *
-   * @param conf configuration
-   */
-  public XceiverClientManager(OzoneConfiguration conf) {
-    Preconditions.checkNotNull(conf);
-    this.conf = conf;
-  }
-
-  /**
-   * Acquires a XceiverClient connected to a container capable of storing the
-   * specified key.
-   *
-   * @param pipeline the container pipeline for the client connection
-   * @return XceiverClient connected to a container
-   * @throws IOException if an XceiverClient cannot be acquired
-   */
-  public XceiverClient acquireClient(Pipeline pipeline) throws IOException {
-    Preconditions.checkNotNull(pipeline);
-    Preconditions.checkArgument(pipeline.getMachines() != null);
-    Preconditions.checkArgument(!pipeline.getMachines().isEmpty());
-    XceiverClient xceiverClient = new XceiverClient(pipeline, conf);
-    try {
-      xceiverClient.connect();
-    } catch (Exception e) {
-      throw new IOException("Exception connecting XceiverClient.", e);
-    }
-    return xceiverClient;
-  }
-
-  /**
-   * Releases an XceiverClient after use.
-   *
-   * @param xceiverClient client to release
-   */
-  public void releaseClient(XceiverClient xceiverClient) {
-    Preconditions.checkNotNull(xceiverClient);
-    xceiverClient.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/package-info.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/package-info.java
deleted file mode 100644
index d3c0278..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/package-info.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.client;
-
-/**
- * This package contains classes for the client of the storage container
- * protocol.
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
index 1d18b5b..6567ae4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
@@ -78,9 +78,9 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneClientUtils;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClientManager;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.XceiverClient;
+import org.apache.hadoop.scm.XceiverClientManager;
 import org.apache.hadoop.ozone.protocol.LocatedContainer;
 import org.apache.hadoop.ozone.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkInputStream.java
index 166e71c..f639b4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkInputStream.java
@@ -29,8 +29,8 @@ import com.google.protobuf.ByteString;
 
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ReadChunkResponseProto;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClientManager;
+import org.apache.hadoop.scm.XceiverClient;
+import org.apache.hadoop.scm.XceiverClientManager;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkOutputStream.java
index e153bb2..1796a69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkOutputStream.java
@@ -31,8 +31,8 @@ import com.google.protobuf.ByteString;
 
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ChunkInfo;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.KeyData;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClientManager;
+import org.apache.hadoop.scm.XceiverClient;
+import org.apache.hadoop.scm.XceiverClientManager;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;
 import org.apache.hadoop.ozone.web.response.KeyInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ContainerProtocolCalls.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ContainerProtocolCalls.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ContainerProtocolCalls.java
index 4cb3ab9..c683a74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ContainerProtocolCalls.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ReadChunkRequ
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ReadChunkResponseProto;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.WriteChunkRequestProto;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
+import org.apache.hadoop.scm.XceiverClient;
 import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;
 import org.apache.hadoop.ozone.web.handlers.UserArgs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef84ac46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
index 8d8fac7..143d058 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
@@ -39,9 +39,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
-import org.apache.hadoop.ozone.container.common.transport.client.XceiverClientManager;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.scm.XceiverClient;
+import org.apache.hadoop.scm.XceiverClientManager;
 import org.apache.hadoop.ozone.protocol.LocatedContainer;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.web.exceptions.OzoneException;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org