You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2017/06/09 17:30:46 UTC

hadoop git commit: HDFS-11185. Ozone: remove disabled tests. Contributed by Anu Engineer.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 2ec6464a1 -> 423c4bde7


HDFS-11185. Ozone: remove disabled tests. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/423c4bde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/423c4bde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/423c4bde

Branch: refs/heads/HDFS-7240
Commit: 423c4bde74622944af9e8bfcca3fb6a6b18d96e4
Parents: 2ec6464
Author: Anu Engineer <ae...@apache.org>
Authored: Fri Jun 9 10:25:25 2017 -0700
Committer: Anu Engineer <ae...@apache.org>
Committed: Fri Jun 9 10:25:25 2017 -0700

----------------------------------------------------------------------
 .../StorageContainerLocationProtocol.java       | 13 ---
 ...rLocationProtocolClientSideTranslatorPB.java | 40 ---------
 .../StorageContainerLocationProtocol.proto      | 33 --------
 ...rLocationProtocolServerSideTranslatorPB.java | 48 +----------
 .../ozone/scm/StorageContainerManager.java      |  9 ---
 .../web/storage/DistributedStorageHandler.java  | 21 -----
 .../ozone/TestStorageContainerManager.java      | 85 --------------------
 7 files changed, 1 insertion(+), 248 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/423c4bde/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java
index c9fa712..23be9e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.scm.protocol;
 
 import java.io.IOException;
-import java.util.Set;
 
 import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
@@ -31,18 +30,6 @@ import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 public interface StorageContainerLocationProtocol {
 
   /**
-   * Find the set of nodes that currently host the container of an object, as
-   * identified by the object key hash.  This method supports batch lookup by
-   * passing multiple key hashes.
-   *
-   * @param keys batch of object keys to find
-   * @return located containers for each object key
-   * @throws IOException if there is any failure
-   */
-  Set<LocatedContainer> getStorageContainerLocations(Set<String> keys)
-      throws IOException;
-
-  /**
    * Asks SCM where a container should be allocated. SCM responds with the
    * set of datanodes that should be used creating this container.
    * @param containerName - Name of the container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/423c4bde/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index bf77907..2d5797a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -18,24 +18,17 @@ package org.apache.hadoop.scm.protocolPB;
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
-import com.google.common.collect.Sets;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.scm.client.ScmClient;
-import org.apache.hadoop.scm.protocol.LocatedContainer;
 import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
 import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetStorageContainerLocationsRequestProto;
-import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetStorageContainerLocationsResponseProto;
-import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.LocatedContainerProto;
 import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
 import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto;
 import org.apache.hadoop.ozone.protocol.proto.StorageContainerLocationProtocolProtos.DeleteContainerRequestProto;
@@ -43,7 +36,6 @@ import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 
 import java.io.Closeable;
 import java.io.IOException;
-import java.util.Set;
 
 /**
  * This class is the client-side translator to translate the requests made on
@@ -71,38 +63,6 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
     this.rpcProxy = rpcProxy;
   }
 
-  @Override
-  public Set<LocatedContainer> getStorageContainerLocations(Set<String> keys)
-      throws IOException {
-    GetStorageContainerLocationsRequestProto.Builder req =
-        GetStorageContainerLocationsRequestProto.newBuilder();
-    for (String key : keys) {
-      req.addKeys(key);
-    }
-    final GetStorageContainerLocationsResponseProto resp;
-    try {
-      resp = rpcProxy.getStorageContainerLocations(NULL_RPC_CONTROLLER,
-          req.build());
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    Set<LocatedContainer> locatedContainers =
-        Sets.newLinkedHashSetWithExpectedSize(resp.getLocatedContainersCount());
-    for (LocatedContainerProto locatedContainer :
-        resp.getLocatedContainersList()) {
-      Set<DatanodeInfo> locations = Sets.newLinkedHashSetWithExpectedSize(
-          locatedContainer.getLocationsCount());
-      for (DatanodeInfoProto location : locatedContainer.getLocationsList()) {
-        locations.add(PBHelperClient.convert(location));
-      }
-      locatedContainers.add(new LocatedContainer(locatedContainer.getKey(),
-          locatedContainer.getMatchedKeyPrefix(),
-          locatedContainer.getContainerName(), locations,
-          PBHelperClient.convert(locatedContainer.getLeader())));
-    }
-    return locatedContainers;
-  }
-
   /**
    * Asks SCM where a container should be allocated. SCM responds with the set
    * of datanodes that should be used creating this container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/423c4bde/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto
index a6f64a1..ca8725b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/StorageContainerLocationProtocol.proto
@@ -32,32 +32,6 @@ import "hdfs.proto";
 import "Ozone.proto";
 
 /**
- * keys - batch of object keys to find
- */
-message GetStorageContainerLocationsRequestProto {
-  repeated string keys = 1;
-}
-
-/**
- * locatedContainers - for each requested hash, nodes that currently host the
- *     container for that object key hash
- */
-message GetStorageContainerLocationsResponseProto {
-  repeated LocatedContainerProto locatedContainers = 1;
-}
-
-/**
- * Holds the nodes that currently host the container for an object key.
- */
-message LocatedContainerProto {
-  required string key = 1;
-  required string matchedKeyPrefix = 2;
-  required string containerName = 3;
-  repeated DatanodeInfoProto locations = 4;
-  required DatanodeInfoProto leader = 5;
-}
-
-/**
 * Request send to SCM asking where the container should be created.
 */
 message ContainerRequestProto {
@@ -106,13 +80,6 @@ message DeleteContainerResponseProto {
  * and response messages for details of the RPC calls.
  */
 service StorageContainerLocationProtocolService {
-  /**
-   * Find the set of nodes that currently host the container of an object, as
-   * identified by the object key hash.  This method supports batch lookup by
-   * passing multiple key hashes.
-   */
-  rpc getStorageContainerLocations(GetStorageContainerLocationsRequestProto)
-      returns(GetStorageContainerLocationsResponseProto);
 
   /**
    * Creates a container entry in SCM.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/423c4bde/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
index d65802c..60ef12e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -18,25 +18,12 @@
 package org.apache.hadoop.ozone.protocolPB;
 
 import java.io.IOException;
-import java.util.Set;
-
-import com.google.common.collect.Sets;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
-import org.apache.hadoop.scm.protocol.LocatedContainer;
 import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.ozone.protocol.proto
-    .StorageContainerLocationProtocolProtos
-    .GetStorageContainerLocationsRequestProto;
-import org.apache.hadoop.ozone.protocol.proto
-    .StorageContainerLocationProtocolProtos
-    .GetStorageContainerLocationsResponseProto;
-import org.apache.hadoop.ozone.protocol.proto
-    .StorageContainerLocationProtocolProtos.LocatedContainerProto;
+
 import static org.apache.hadoop.ozone.protocol.proto
     .StorageContainerLocationProtocolProtos.ContainerRequestProto;
 import org.apache.hadoop.ozone.protocol.proto
@@ -74,39 +61,6 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
   }
 
   @Override
-  public GetStorageContainerLocationsResponseProto getStorageContainerLocations(
-      RpcController unused, GetStorageContainerLocationsRequestProto req)
-      throws ServiceException {
-    Set<String> keys = Sets.newLinkedHashSetWithExpectedSize(
-        req.getKeysCount());
-    for (String key : req.getKeysList()) {
-      keys.add(key);
-    }
-    final Set<LocatedContainer> locatedContainers;
-    try {
-      locatedContainers = impl.getStorageContainerLocations(keys);
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-    GetStorageContainerLocationsResponseProto.Builder resp =
-        GetStorageContainerLocationsResponseProto.newBuilder();
-    for (LocatedContainer locatedContainer : locatedContainers) {
-      LocatedContainerProto.Builder locatedContainerProto =
-          LocatedContainerProto.newBuilder()
-              .setKey(locatedContainer.getKey())
-              .setMatchedKeyPrefix(locatedContainer.getMatchedKeyPrefix())
-              .setContainerName(locatedContainer.getContainerName());
-      for (DatanodeInfo location : locatedContainer.getLocations()) {
-        locatedContainerProto.addLocations(PBHelperClient.convert(location));
-      }
-      locatedContainerProto.setLeader(
-          PBHelperClient.convert(locatedContainer.getLeader()));
-      resp.addLocatedContainers(locatedContainerProto.build());
-    }
-    return resp.build();
-  }
-
-  @Override
   public ContainerResponseProto allocateContainer(RpcController unused,
       ContainerRequestProto request) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/423c4bde/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
index 3f0f6d3..729a12a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.scm.container.common.helpers.DeleteBlockResult;
 import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol;
-import org.apache.hadoop.scm.protocol.LocatedContainer;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
@@ -376,14 +375,6 @@ public class StorageContainerManager
         .setDatanodeUUID(rCmd.getDatanodeUUID()).build();
   }
 
-  // TODO : This code will move into KSM later. Write now this code is stubbed
-  // implementation that lets the ozone tests pass.
-  @Override
-  public Set<LocatedContainer> getStorageContainerLocations(Set<String> keys)
-      throws IOException {
-    throw new IOException("Not Implemented.");
-  }
-
   /**
    * Asks SCM where a container should be allocated. SCM responds with the set
    * of datanodes that should be used creating this container.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/423c4bde/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
index d14bc93..468f22a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java
@@ -76,8 +76,6 @@ import java.util.Date;
 import java.util.Set;
 import java.util.TimeZone;
 import java.util.Locale;
-import java.util.HashSet;
-import java.util.Arrays;
 import java.util.List;
 
 /**
@@ -449,25 +447,6 @@ public final class DistributedStorageHandler implements StorageHandler {
   }
 
   /**
-   * Acquires an {@link XceiverClientSpi} connected to a {@link Pipeline}
-   * of nodes capable of serving container protocol operations.
-   * The container is selected based on the specified container key.
-   *
-   * @param containerKey container key
-   * @return XceiverClient connected to a container
-   * @throws IOException if an XceiverClient cannot be acquired
-   */
-  private XceiverClientSpi acquireXceiverClient(String containerKey)
-      throws IOException {
-    Set<LocatedContainer> locatedContainers =
-        storageContainerLocationClient.getStorageContainerLocations(
-            new HashSet<>(Arrays.asList(containerKey)));
-    Pipeline pipeline = newPipelineFromLocatedContainer(
-        locatedContainers.iterator().next());
-    return xceiverClientManager.acquireClient(pipeline);
-  }
-
-  /**
    * Creates a container key from any number of components by combining all
    * components with a delimiter.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/423c4bde/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index dd8ed2a..0fb2289 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -159,89 +159,4 @@ public class TestStorageContainerManager {
     Assert.assertTrue(e instanceof IOException);
     Assert.assertEquals(expectedErrorMessage, e.getMessage());
   }
-
-  // TODO : Disabling this test after verifying that failure is due
-  // Not Implemented exception. Will turn on this test in next patch
-  //@Test
-  public void testLocationsForSingleKey() throws Exception {
-    cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(1)
-        .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
-    storageContainerLocationClient =
-        cluster.createStorageContainerLocationClient();
-    Set<LocatedContainer> containers =
-        storageContainerLocationClient.getStorageContainerLocations(
-            new LinkedHashSet<>(Arrays.asList("/key1")));
-    assertNotNull(containers);
-    assertEquals(1, containers.size());
-    assertLocatedContainer(containers, "/key1", 1);
-  }
-
-  // TODO : Disabling this test after verifying that failure is due
-  // Not Implemented exception. Will turn on this test in next patch
-  //@Test
-  public void testLocationsForMultipleKeys() throws Exception {
-    cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(1)
-        .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
-    storageContainerLocationClient =
-        cluster.createStorageContainerLocationClient();
-    Set<LocatedContainer> containers =
-        storageContainerLocationClient.getStorageContainerLocations(
-            new LinkedHashSet<>(Arrays.asList("/key1", "/key2", "/key3")));
-    assertNotNull(containers);
-    assertEquals(3, containers.size());
-    assertLocatedContainer(containers, "/key1", 1);
-    assertLocatedContainer(containers, "/key2", 1);
-    assertLocatedContainer(containers, "/key3", 1);
-  }
-  // TODO : Disabling this test after verifying that failure is due
-  // Not Implemented exception. Will turn on this test in next patch
-  //@Test
-  public void testNoDataNodes() throws Exception {
-    cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(0)
-        .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
-        .doNotwaitTobeOutofChillMode()
-        .build();
-    storageContainerLocationClient =
-        cluster.createStorageContainerLocationClient();
-    exception.expect(IOException.class);
-    exception.expectMessage("locations not found");
-    storageContainerLocationClient.getStorageContainerLocations(
-        new LinkedHashSet<>(Arrays.asList("/key1")));
-  }
-
-  // TODO : Disabling this test after verifying that failure is due
-  // Not Implemented exception. Will turn on this test in next patch
-  //@Test
-  public void testMultipleDataNodes() throws Exception {
-    cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(3)
-        .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
-    storageContainerLocationClient =
-        cluster.createStorageContainerLocationClient();
-    Set<LocatedContainer> containers =
-        storageContainerLocationClient.getStorageContainerLocations(
-            new LinkedHashSet<>(Arrays.asList("/key1")));
-    assertNotNull(containers);
-    assertEquals(1, containers.size());
-    assertLocatedContainer(containers, "/key1", 3);
-  }
-
-  private static void assertLocatedContainer(Set<LocatedContainer> containers,
-      String key, int expectedNumLocations) {
-    LocatedContainer container = null;
-    for (LocatedContainer curContainer: containers) {
-      if (key.equals(curContainer.getKey())) {
-        container = curContainer;
-        break;
-      }
-    }
-    assertNotNull("Container for key " + key + " not found.", container);
-    assertEquals(key, container.getKey());
-    assertNotNull(container.getMatchedKeyPrefix());
-    assertFalse(container.getMatchedKeyPrefix().isEmpty());
-    assertNotNull(container.getContainerName());
-    assertFalse(container.getContainerName().isEmpty());
-    assertNotNull(container.getLocations());
-    assertEquals(expectedNumLocations, container.getLocations().size());
-    assertNotNull(container.getLeader());
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org